mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 09:23:56 +00:00
Merge branch 'develop' of github.com:saltstack/salt into develop
This commit is contained in:
commit
3505ca2cc8
1
.gitignore
vendored
1
.gitignore
vendored
@ -40,6 +40,7 @@ htmlcov/
|
||||
/.idea
|
||||
/.ropeproject
|
||||
.ropeproject
|
||||
*_flymake.py
|
||||
/*.iml
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
|
@ -4844,3 +4844,27 @@ source_file = _build/locale/topics/releases/2014.1.11.pot
|
||||
source_lang = en
|
||||
source_name = topics/releases/2014.1.11.rst
|
||||
|
||||
[salt.ref--cli--salt-unity]
|
||||
file_filter = locale/<lang>/LC_MESSAGES/ref/cli/salt-unity.po
|
||||
source_file = _build/locale/ref/cli/salt-unity.pot
|
||||
source_lang = en
|
||||
source_name = ref/cli/salt-unity.rst
|
||||
|
||||
[salt.topics--development--architecture]
|
||||
file_filter = locale/<lang>/LC_MESSAGES/topics/development/architecture.po
|
||||
source_file = _build/locale/topics/development/architecture.pot
|
||||
source_lang = en
|
||||
source_name = topics/development/architecture.rst
|
||||
|
||||
[salt.topics--releases--2014_1_12]
|
||||
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/2014.1.12.po
|
||||
source_file = _build/locale/topics/releases/2014.1.12.pot
|
||||
source_lang = en
|
||||
source_name = topics/releases/2014.1.12.rst
|
||||
|
||||
[salt.topics--releases--2014_1_13]
|
||||
file_filter = locale/<lang>/LC_MESSAGES/topics/releases/2014.1.13.po
|
||||
source_file = _build/locale/topics/releases/2014.1.13.pot
|
||||
source_lang = en
|
||||
source_name = topics/releases/2014.1.13.rst
|
||||
|
||||
|
2
doc/_templates/version.html
vendored
2
doc/_templates/version.html
vendored
@ -10,5 +10,5 @@
|
||||
<p>Latest Salt release: <a href="{{ pathto('topics/releases/{0}'.format(release)) }}">{{ release }}</a></p>
|
||||
|
||||
<p>Try the shiny new release candidate of Salt,
|
||||
<a href="{{ pathto('topics/releases/2014.7.0') }}">v2014.7.0rc6</a>! More info
|
||||
<a href="{{ pathto('topics/releases/2014.7.0') }}">v2014.7.0rc7</a>! More info
|
||||
<a href="{{ pathto('topics/releases/releasecandidate') }}">here</a>.</p>
|
||||
|
@ -240,6 +240,9 @@ distro the minion is running, in case they differ from the example below.
|
||||
Windows
|
||||
*******
|
||||
|
||||
For Windows machines, restarting the minion at can be accomplished by
|
||||
adding the following state:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
schedule-start:
|
||||
|
@ -39,20 +39,20 @@ Options
|
||||
|
||||
.. option:: --async
|
||||
|
||||
Instead of waiting for the job to run on minions only print the jod id of
|
||||
Instead of waiting for the job to run on minions only print the job id of
|
||||
the started execution and complete.
|
||||
|
||||
.. option:: --state-output=STATE_OUTPUT
|
||||
|
||||
.. versionadded:: 0.17
|
||||
|
||||
Override the configured state_output value for minion output. Default:
|
||||
full
|
||||
|
||||
Choose the format of the state output. The options are `full`,
|
||||
`terse`, `mixed`, `changes` and `filter`. Default: full
|
||||
|
||||
.. option:: --subset=SUBSET
|
||||
|
||||
Execute the routine on a random subset of the targeted minions. The
|
||||
minions will be verified that they have the named function before
|
||||
Execute the routine on a random subset of the targeted minions. The
|
||||
minions will be verified that they have the named function before
|
||||
executing.
|
||||
|
||||
.. option:: -v VERBOSE, --verbose
|
||||
@ -75,7 +75,9 @@ Options
|
||||
.. option:: -a EAUTH, --auth=EAUTH
|
||||
|
||||
Pass in an external authentication medium to validate against. The
|
||||
credentials will be prompted for. Can be used with the -T option.
|
||||
credentials will be prompted for. The options are `auto`,
|
||||
`keystone`, `ldap`, `pam` and `stormpath`. Can be used with the -T
|
||||
option.
|
||||
|
||||
.. option:: -T, --make-token
|
||||
|
||||
@ -85,9 +87,13 @@ Options
|
||||
|
||||
.. option:: --return=RETURNER
|
||||
|
||||
Chose an alternative returner to call on the minion, if an alternative
|
||||
returner is used then the return will not come back to the command line
|
||||
but will be sent to the specified return system.
|
||||
Choose an alternative returner to call on the minion, if an
|
||||
alternative returner is used then the return will not come back to
|
||||
the command line but will be sent to the specified return system.
|
||||
The options are `carbon`, `cassandra`, `couchbase`, `couchdb`,
|
||||
`elasticsearch`, `etcd`, `hipchat`, `local`, `local_cache`,
|
||||
`memcache`, `mongo`, `mysql`, `odbc`, `postgres`, `redis`,
|
||||
`sentry`, `slack`, `sms`, `smtp`, `sqlite3`, `syslog` and `xmpp`.
|
||||
|
||||
.. option:: -d, --doc, --documentation
|
||||
|
||||
@ -95,8 +101,8 @@ Options
|
||||
|
||||
.. option:: --args-separator=ARGS_SEPARATOR
|
||||
|
||||
Set the special argument used as a delimiter between command arguments of
|
||||
compound commands. This is useful when one wants to pass commas as
|
||||
Set the special argument used as a delimiter between command arguments of
|
||||
compound commands. This is useful when one wants to pass commas as
|
||||
arguments to some of the commands in a compound command.
|
||||
|
||||
.. include:: _includes/logging-options.rst
|
||||
|
@ -202,11 +202,11 @@ a state that has not yet been executed. The state containing the ``prereq``
|
||||
requisite is defined as the pre-requiring state. The state specified in the
|
||||
``prereq`` statement is defined as the pre-required state.
|
||||
|
||||
When ``prereq`` is called, the pre-required state reports if it expects to
|
||||
have any changes. It does this by running the pre-required single state as a
|
||||
test-run by enabling ``test=True``. This test-run will return a dictionary
|
||||
containing a key named "changes". (See the ``watch`` section above for
|
||||
examples of "changes" dictionaries.)
|
||||
When a ``prereq`` requisite is evaluated, the pre-required state reports if it
|
||||
expects to have any changes. It does this by running the pre-required single
|
||||
state as a test-run by enabling ``test=True``. This test-run will return a
|
||||
dictionary containing a key named "changes". (See the ``watch`` section above
|
||||
for examples of "changes" dictionaries.)
|
||||
|
||||
If the "changes" key contains a populated dictionary, it means that the
|
||||
pre-required state expects changes to occur when the state is actually
|
||||
|
@ -315,8 +315,7 @@ different from the base must be specified of the alternates:
|
||||
'python': 'dev-python/mysql-python',
|
||||
},
|
||||
},
|
||||
merge=salt['pillar.get']('mysql:lookup'),
|
||||
base=default) %}
|
||||
merge=salt['pillar.get']('mysql:lookup'), base=default) %}
|
||||
|
||||
|
||||
Overriding values in the lookup table
|
||||
@ -338,6 +337,26 @@ Pillar would replace the ``config`` value from the call above.
|
||||
lookup:
|
||||
config: /usr/local/etc/mysql/my.cnf
|
||||
|
||||
.. note:: Protecting Expansion of Content with Special Characters
|
||||
|
||||
When templating keep in mind that YAML does have special characters
|
||||
for quoting, flows and other special structure and content. When a
|
||||
Jinja substitution may have special characters that will be
|
||||
incorrectly parsed by YAML the expansion must be protected by quoting.
|
||||
It is a good policy to quote all Jinja expansions especially when
|
||||
values may originate from Pillar. Salt provides a Jinja filter for
|
||||
doing just this: ``yaml_dquote``
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{%- set baz = '"The quick brown fox . . ."' %}
|
||||
{%- set zap = 'The word of the day is "salty".' %}
|
||||
|
||||
{%- load_yaml as foo %}
|
||||
bar: {{ baz|yaml_dquote }}
|
||||
zip: {{ zap|yaml_dquote }}
|
||||
{%- endload %}
|
||||
|
||||
Single-purpose SLS files
|
||||
------------------------
|
||||
|
||||
|
Binary file not shown.
@ -107,6 +107,10 @@ try:
|
||||
PAM_AUTHENTICATE = LIBPAM.pam_authenticate
|
||||
PAM_AUTHENTICATE.restype = c_int
|
||||
PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]
|
||||
|
||||
PAM_END = LIBPAM.pam_end
|
||||
PAM_END.restype = c_int
|
||||
PAM_END.argtypes = [PamHandle, c_int]
|
||||
except Exception:
|
||||
HAS_PAM = False
|
||||
else:
|
||||
@ -155,9 +159,11 @@ def authenticate(username, password, service='login'):
|
||||
if retval != 0:
|
||||
# TODO: This is not an authentication error, something
|
||||
# has gone wrong starting up PAM
|
||||
PAM_END(handle, retval)
|
||||
return False
|
||||
|
||||
retval = PAM_AUTHENTICATE(handle, 0)
|
||||
PAM_END(handle, 0)
|
||||
return retval == 0
|
||||
|
||||
|
||||
|
91
salt/auth/yubico.py
Normal file
91
salt/auth/yubico.py
Normal file
@ -0,0 +1,91 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
'''
|
||||
Provide authentication using YubiKey
|
||||
|
||||
To get your YubiKey API key you will need to visit the website below.
|
||||
|
||||
https://upgrade.yubico.com/getapikey/
|
||||
|
||||
The resulting page will show the generated Client ID (aka AuthID or API ID)
|
||||
and the generated API key (Secret Key). Make a note of both and use these
|
||||
two values in your /etc/salt/master configuration.
|
||||
|
||||
/etc/salt/master
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
yubico_users:
|
||||
damian:
|
||||
id: 12345
|
||||
key: ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
external_auth:
|
||||
yubico:
|
||||
damian:
|
||||
- test.*
|
||||
|
||||
|
||||
Please wait five to ten minutes after generating the key before testing so that
|
||||
the API key will be updated on all the YubiCloud servers.
|
||||
|
||||
:depends: - yubico-client Python module
|
||||
'''
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
from yubico_client import Yubico, yubico_exceptions
|
||||
HAS_YUBICO = True
|
||||
except ImportError:
|
||||
HAS_YUBICO = False
|
||||
|
||||
|
||||
def __get_yubico_users(username):
|
||||
'''
|
||||
Grab the YubiKey Client ID & Secret Key
|
||||
'''
|
||||
user = {}
|
||||
|
||||
try:
|
||||
if __opts__['yubico_users'].get(username, None):
|
||||
(user['id'], user['key']) = __opts__['yubico_users'][username].values()
|
||||
else:
|
||||
return None
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
return user
|
||||
|
||||
|
||||
def auth(username, password):
|
||||
'''
|
||||
Authentcate against yubico server
|
||||
'''
|
||||
_cred = __get_yubico_users(username)
|
||||
|
||||
client = Yubico(_cred['id'], _cred['key'])
|
||||
|
||||
try:
|
||||
if client.verify(password):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except yubico_exceptions.StatusCodeError, e:
|
||||
log.info('Unable to verify YubiKey `{0}`'.format(e))
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
__opts__ = {'yubico_users': {'damian': {'id': '12345', 'key': 'ABC123'}}}
|
||||
|
||||
if auth('damian', 'OPT'):
|
||||
print "Authenticated"
|
||||
else:
|
||||
print "Failed to authenticate"
|
@ -8,6 +8,7 @@ from __future__ import print_function
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from glob import glob
|
||||
|
||||
# Import salt libs
|
||||
import salt.cli.caller
|
||||
@ -20,6 +21,7 @@ import salt.output
|
||||
import salt.runner
|
||||
import salt.auth
|
||||
import salt.key
|
||||
from salt.config import _expand_glob_path
|
||||
|
||||
from salt.utils import parsers, print_cli
|
||||
from salt.utils.verify import check_user, verify_env, verify_files
|
||||
@ -167,7 +169,6 @@ class SaltCMD(parsers.SaltCMDOptionParser):
|
||||
retcodes = []
|
||||
try:
|
||||
# local will be None when there was an error
|
||||
errors = []
|
||||
if local:
|
||||
if self.options.subset:
|
||||
cmd_func = local.cmd_subset
|
||||
@ -193,20 +194,16 @@ class SaltCMD(parsers.SaltCMDOptionParser):
|
||||
kwargs['verbose'] = True
|
||||
ret = {}
|
||||
for full_ret in cmd_func(**kwargs):
|
||||
try:
|
||||
ret_, out, retcode = self._format_ret(full_ret)
|
||||
retcodes.append(retcode)
|
||||
self._output_ret(ret_, out)
|
||||
ret.update(ret_)
|
||||
except KeyError:
|
||||
errors.append(full_ret)
|
||||
ret_, out, retcode = self._format_ret(full_ret)
|
||||
retcodes.append(retcode)
|
||||
self._output_ret(ret_, out)
|
||||
ret.update(ret_)
|
||||
|
||||
# Returns summary
|
||||
if self.config['cli_summary'] is True:
|
||||
if self.config['fun'] != 'sys.doc':
|
||||
if self.options.output is None:
|
||||
self._print_returns_summary(ret)
|
||||
self._print_errors_summary(errors)
|
||||
|
||||
# NOTE: Return code is set here based on if all minions
|
||||
# returned 'ok' with a retcode of 0.
|
||||
@ -220,15 +217,6 @@ class SaltCMD(parsers.SaltCMDOptionParser):
|
||||
out = ''
|
||||
self._output_ret(ret, out)
|
||||
|
||||
def _print_errors_summary(self, errors):
|
||||
if errors:
|
||||
print_cli('\n')
|
||||
print_cli('---------------------------')
|
||||
print_cli('Errors')
|
||||
print_cli('---------------------------')
|
||||
for minion in errors:
|
||||
print_cli(self._format_error(minion))
|
||||
|
||||
def _print_returns_summary(self, ret):
|
||||
'''
|
||||
Display returns summary
|
||||
@ -281,12 +269,6 @@ class SaltCMD(parsers.SaltCMDOptionParser):
|
||||
retcode = data['retcode']
|
||||
return ret, out, retcode
|
||||
|
||||
def _format_error(self, minion_error):
|
||||
|
||||
for minion, error_doc in minion_error.items():
|
||||
error = 'Minion [{0}] encountered exception \'{1}\''.format(minion, error_doc['exception']['message'])
|
||||
return error
|
||||
|
||||
def _print_docs(self, ret):
|
||||
'''
|
||||
Print out the docstrings for all of the functions on the minions
|
||||
@ -418,12 +400,12 @@ class SaltCall(parsers.SaltCallOptionParser):
|
||||
if self.options.file_root:
|
||||
# check if the argument is pointing to a file on disk
|
||||
file_root = os.path.abspath(self.options.file_root)
|
||||
self.config['file_roots'] = {'base': [file_root]}
|
||||
self.config['file_roots'] = {'base': _expand_glob_path([file_root])}
|
||||
|
||||
if self.options.pillar_root:
|
||||
# check if the argument is pointing to a file on disk
|
||||
pillar_root = os.path.abspath(self.options.pillar_root)
|
||||
self.config['pillar_roots'] = {'base': [pillar_root]}
|
||||
self.config['pillar_roots'] = {'base': _expand_glob_path([pillar_root])}
|
||||
|
||||
if self.options.local:
|
||||
self.config['file_client'] = 'local'
|
||||
|
@ -156,7 +156,7 @@ class Batch(object):
|
||||
# add all minions that belong to this iterator and
|
||||
# that have not responded to parts{} with an empty response
|
||||
for minion in minion_tracker[queue]['minions']:
|
||||
if minion not in parts.keys():
|
||||
if minion not in parts:
|
||||
parts[minion] = {}
|
||||
parts[minion]['ret'] = {}
|
||||
|
||||
@ -180,7 +180,7 @@ class Batch(object):
|
||||
self.opts)
|
||||
|
||||
# remove inactive iterators from the iters list
|
||||
for queue in minion_tracker.keys():
|
||||
for queue in minion_tracker:
|
||||
# only remove inactive queues
|
||||
if not minion_tracker[queue]['active'] and queue in iters:
|
||||
iters.remove(queue)
|
||||
|
@ -255,8 +255,11 @@ class RAETCaller(ZeroMQCaller):
|
||||
'''
|
||||
Pass in the command line options
|
||||
'''
|
||||
self.stack = self._setup_caller_stack(opts)
|
||||
stack, estatename, yardname = self._setup_caller_stack(opts)
|
||||
self.stack = stack
|
||||
salt.transport.jobber_stack = self.stack
|
||||
#salt.transport.jobber_estate_name = estatename
|
||||
#salt.transport.jobber_yard_name = yardname
|
||||
|
||||
super(RAETCaller, self).__init__(opts)
|
||||
|
||||
@ -307,8 +310,8 @@ class RAETCaller(ZeroMQCaller):
|
||||
raise ValueError(emsg)
|
||||
|
||||
sockdirpath = opts['sock_dir']
|
||||
name = 'caller' + nacling.uuid(size=18)
|
||||
stack = LaneStack(name=name,
|
||||
stackname = 'caller' + nacling.uuid(size=18)
|
||||
stack = LaneStack(name=stackname,
|
||||
lanename=lanename,
|
||||
sockdirpath=sockdirpath)
|
||||
|
||||
@ -318,4 +321,11 @@ class RAETCaller(ZeroMQCaller):
|
||||
lanename=lanename,
|
||||
dirpath=sockdirpath))
|
||||
log.debug("Created Caller Jobber Stack {0}\n".format(stack.name))
|
||||
return stack
|
||||
|
||||
# name of Road Estate for this caller
|
||||
estatename = "{0}_{1}".format(role, kind)
|
||||
# name of Yard for this caller
|
||||
yardname = stack.local.name
|
||||
|
||||
# return identifiers needed to route back to this callers master
|
||||
return (stack, estatename, yardname)
|
||||
|
@ -768,7 +768,7 @@ class LocalClient(object):
|
||||
# get the info from the cache
|
||||
ret = self.get_cache_returns(jid)
|
||||
if ret != {}:
|
||||
found.update(set(ret.keys()))
|
||||
found.update(set(ret))
|
||||
yield ret
|
||||
|
||||
# if you have all the returns, stop
|
||||
@ -778,7 +778,7 @@ class LocalClient(object):
|
||||
# otherwise, get them from the event system
|
||||
for event in event_iter:
|
||||
if event != {}:
|
||||
found.update(set(event.keys()))
|
||||
found.update(set(event))
|
||||
yield event
|
||||
if len(found.intersection(minions)) >= len(minions):
|
||||
raise StopIteration()
|
||||
@ -787,8 +787,7 @@ class LocalClient(object):
|
||||
def get_returns_no_block(
|
||||
self,
|
||||
jid,
|
||||
event=None,
|
||||
gather_errors=False):
|
||||
event=None):
|
||||
'''
|
||||
Raw function to just return events of jid excluding timeout logic
|
||||
|
||||
@ -798,30 +797,17 @@ class LocalClient(object):
|
||||
event = self.event
|
||||
while True:
|
||||
if HAS_ZMQ:
|
||||
if not gather_errors:
|
||||
try:
|
||||
raw = event.get_event_noblock()
|
||||
if raw and raw.get('tag', '').startswith(jid):
|
||||
yield raw
|
||||
else:
|
||||
yield None
|
||||
except zmq.ZMQError as ex:
|
||||
if ex.errno == errno.EAGAIN or ex.errno == errno.EINTR:
|
||||
yield None
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
try:
|
||||
raw = event.get_event_noblock()
|
||||
if raw and (raw.get('tag', '').startswith(jid) or raw.get('tag', '').startswith('_salt_error')):
|
||||
yield raw
|
||||
else:
|
||||
yield None
|
||||
except zmq.ZMQError as ex:
|
||||
if ex.errno == errno.EAGAIN or ex.errno == errno.EINTR:
|
||||
yield None
|
||||
else:
|
||||
raise
|
||||
try:
|
||||
raw = event.get_event_noblock()
|
||||
if raw and raw.get('tag', '').startswith(jid):
|
||||
yield raw
|
||||
else:
|
||||
yield None
|
||||
except zmq.ZMQError as ex:
|
||||
if ex.errno == errno.EAGAIN or ex.errno == errno.EINTR:
|
||||
yield None
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
raw = event.get_event_noblock()
|
||||
if raw and raw.get('tag', '').startswith(jid):
|
||||
@ -837,7 +823,6 @@ class LocalClient(object):
|
||||
tgt='*',
|
||||
tgt_type='glob',
|
||||
expect_minions=False,
|
||||
gather_errors=True,
|
||||
**kwargs):
|
||||
'''
|
||||
Watch the event system and return job data as it comes in
|
||||
@ -868,7 +853,7 @@ class LocalClient(object):
|
||||
syndic_wait = 0
|
||||
last_time = False
|
||||
# iterator for this job's return
|
||||
ret_iter = self.get_returns_no_block(jid, gather_errors=gather_errors)
|
||||
ret_iter = self.get_returns_no_block(jid)
|
||||
# iterator for the info of this job
|
||||
jinfo_iter = []
|
||||
timeout_at = time.time() + timeout
|
||||
@ -886,10 +871,7 @@ class LocalClient(object):
|
||||
# if we got None, then there were no events
|
||||
if raw is None:
|
||||
break
|
||||
if gather_errors:
|
||||
if raw['tag'] == '_salt_error':
|
||||
ret = {raw['data']['id']: raw['data']['data']}
|
||||
yield ret
|
||||
|
||||
if 'minions' in raw.get('data', {}):
|
||||
minions.update(raw['data']['minions'])
|
||||
continue
|
||||
@ -1078,7 +1060,7 @@ class LocalClient(object):
|
||||
ret[minion] = m_data
|
||||
|
||||
# if we have all the minion returns, lets just return
|
||||
if len(set(ret.keys()).intersection(minions)) >= len(minions):
|
||||
if len(set(ret).intersection(minions)) >= len(minions):
|
||||
return ret
|
||||
|
||||
# otherwise lets use the listener we created above to get the rest
|
||||
@ -1094,7 +1076,7 @@ class LocalClient(object):
|
||||
ret[minion] = m_data
|
||||
|
||||
# are we done yet?
|
||||
if len(set(ret.keys()).intersection(minions)) >= len(minions):
|
||||
if len(set(ret).intersection(minions)) >= len(minions):
|
||||
return ret
|
||||
|
||||
# otherwise we hit the timeout, return what we have
|
||||
|
@ -441,7 +441,7 @@ class SSH(object):
|
||||
sret = {}
|
||||
outputter = self.opts.get('output', 'nested')
|
||||
for ret in self.handle_ssh():
|
||||
host = ret.keys()[0]
|
||||
host = ret.iterkeys().next()
|
||||
self.cache_job(jid, host, ret[host])
|
||||
ret = self.key_deploy(host, ret)
|
||||
if not isinstance(ret[host], dict):
|
||||
|
@ -177,7 +177,7 @@ class CloudClient(object):
|
||||
self.opts['providers'].update({name: {driver: provider}})
|
||||
for name, profile in pillars.pop('profiles', {}).items():
|
||||
provider = profile['provider'].split(':')[0]
|
||||
driver = self.opts['providers'][provider].keys()[0]
|
||||
driver = self.opts['providers'][provider].iterkeys().next()
|
||||
profile['provider'] = '{0}:{1}'.format(provider, driver)
|
||||
profile['profile'] = name
|
||||
self.opts['profiles'].update({name: profile})
|
||||
@ -214,7 +214,7 @@ class CloudClient(object):
|
||||
# also filter them to speedup methods like
|
||||
# __filter_non_working_providers
|
||||
providers = [a.get('provider', '').split(':')[0]
|
||||
for a in opts['profiles'].values()
|
||||
for a in opts['profiles'].itervalues()
|
||||
if a.get('provider', '')]
|
||||
if providers:
|
||||
_providers = opts.get('providers', {})
|
||||
@ -367,7 +367,7 @@ class CloudClient(object):
|
||||
mapper = salt.cloud.Map(self._opts_defaults())
|
||||
providers = self.opts['providers']
|
||||
if provider in providers:
|
||||
provider += ':{0}'.format(providers[provider].keys()[0])
|
||||
provider += ':{0}'.format(providers[provider].iterkeys().next())
|
||||
else:
|
||||
return False
|
||||
if isinstance(names, str):
|
||||
@ -400,7 +400,7 @@ class CloudClient(object):
|
||||
mapper = salt.cloud.Map(self._opts_defaults())
|
||||
providers = mapper.map_providers_parallel()
|
||||
if provider in providers:
|
||||
provider += ':{0}'.format(providers[provider].keys()[0])
|
||||
provider += ':{0}'.format(providers[provider].iterkeys().next())
|
||||
else:
|
||||
return False
|
||||
if isinstance(names, str):
|
||||
@ -1484,7 +1484,7 @@ class Cloud(object):
|
||||
Remove any mis-configured cloud providers from the available listing
|
||||
'''
|
||||
for alias, drivers in self.opts['providers'].copy().iteritems():
|
||||
for driver in drivers.copy().keys():
|
||||
for driver in drivers.copy():
|
||||
fun = '{0}.get_configured_provider'.format(driver)
|
||||
if fun not in self.clouds:
|
||||
# Mis-configured provider that got removed?
|
||||
@ -1538,7 +1538,7 @@ class Map(Cloud):
|
||||
interpolated_map = {}
|
||||
|
||||
for profile, mapped_vms in rendered_map.items():
|
||||
names = set(mapped_vms.keys())
|
||||
names = set(mapped_vms)
|
||||
if profile not in self.opts['profiles']:
|
||||
if 'Errors' not in interpolated_map:
|
||||
interpolated_map['Errors'] = {}
|
||||
@ -1694,7 +1694,7 @@ class Map(Cloud):
|
||||
|
||||
def _has_loop(self, dmap, seen=None, val=None):
|
||||
if seen is None:
|
||||
for values in dmap['create'].values():
|
||||
for values in dmap['create'].itervalues():
|
||||
seen = []
|
||||
try:
|
||||
machines = values['requires']
|
||||
@ -2097,7 +2097,7 @@ class Map(Cloud):
|
||||
if self.opts['start_action']:
|
||||
actionlist = []
|
||||
grp = -1
|
||||
for key, val in groupby(dmap['create'].values(),
|
||||
for key, val in groupby(dmap['create'].itervalues(),
|
||||
lambda x: x['level']):
|
||||
actionlist.append([])
|
||||
grp += 1
|
||||
@ -2117,7 +2117,7 @@ class Map(Cloud):
|
||||
timeout=self.opts['timeout'] * 60, expr_form='list'
|
||||
))
|
||||
for obj in output_multip:
|
||||
obj.values()[0]['ret'] = out[obj.keys()[0]]
|
||||
obj.itervalues().next()['ret'] = out[obj.iterkeys().next()]
|
||||
output.update(obj)
|
||||
else:
|
||||
for obj in output_multip:
|
||||
|
@ -304,7 +304,7 @@ class SaltCloud(parsers.SaltCloudParser):
|
||||
log.info('Complete')
|
||||
|
||||
if dmap.get('existing', None):
|
||||
for name in dmap['existing'].keys():
|
||||
for name in dmap['existing']:
|
||||
ret[name] = {'Message': 'Already running'}
|
||||
|
||||
except (SaltCloudException, Exception) as exc:
|
||||
|
@ -102,7 +102,7 @@ def avail_locations(call=None):
|
||||
ret = {}
|
||||
for region in items['Regions']['Region']:
|
||||
ret[region['RegionId']] = {}
|
||||
for item in region.keys():
|
||||
for item in region:
|
||||
ret[region['RegionId']][item] = str(region[item])
|
||||
|
||||
return ret
|
||||
@ -133,7 +133,7 @@ def avail_images(kwargs=None, call=None):
|
||||
ret = {}
|
||||
for image in items['Images']['Image']:
|
||||
ret[image['ImageId']] = {}
|
||||
for item in image.keys():
|
||||
for item in image:
|
||||
ret[image['ImageId']][item] = str(image[item])
|
||||
|
||||
return ret
|
||||
@ -155,7 +155,7 @@ def avail_sizes(call=None):
|
||||
ret = {}
|
||||
for image in items['InstanceTypes']['InstanceType']:
|
||||
ret[image['InstanceTypeId']] = {}
|
||||
for item in image.keys():
|
||||
for item in image:
|
||||
ret[image['InstanceTypeId']][item] = str(image[item])
|
||||
|
||||
return ret
|
||||
@ -192,7 +192,7 @@ def list_availability_zones(call=None):
|
||||
|
||||
for zone in items['Zones']['Zone']:
|
||||
ret[zone['ZoneId']] = {}
|
||||
for item in zone.keys():
|
||||
for item in zone:
|
||||
ret[zone['ZoneId']][item] = str(zone[item])
|
||||
|
||||
return ret
|
||||
@ -225,7 +225,7 @@ def list_nodes_min(call=None):
|
||||
|
||||
for node in nodes['InstanceStatuses']['InstanceStatus']:
|
||||
ret[node['InstanceId']] = {}
|
||||
for item in node.keys():
|
||||
for item in node:
|
||||
ret[node['InstanceId']][item] = node[item]
|
||||
|
||||
return ret
|
||||
@ -299,7 +299,7 @@ def list_nodes_full(call=None):
|
||||
'size': 'TODO',
|
||||
'state': items['Status']
|
||||
}
|
||||
for item in items.keys():
|
||||
for item in items:
|
||||
value = items[item]
|
||||
if value is not None:
|
||||
value = str(value)
|
||||
@ -350,7 +350,7 @@ def list_securitygroup(call=None):
|
||||
ret = {}
|
||||
for sg in result['SecurityGroups']['SecurityGroup']:
|
||||
ret[sg['SecurityGroupId']] = {}
|
||||
for item in sg.keys():
|
||||
for item in sg:
|
||||
ret[sg['SecurityGroupId']][item] = sg[item]
|
||||
|
||||
return ret
|
||||
@ -368,7 +368,7 @@ def get_image(vm_):
|
||||
if not vm_image:
|
||||
raise SaltCloudNotFound('No image specified for this VM.')
|
||||
|
||||
if vm_image and str(vm_image) in images.keys():
|
||||
if vm_image and str(vm_image) in images:
|
||||
return images[vm_image]['ImageId']
|
||||
raise SaltCloudNotFound(
|
||||
'The specified image, {0!r}, could not be found.'.format(vm_image)
|
||||
@ -387,7 +387,7 @@ def get_securitygroup(vm_):
|
||||
if not securitygroup:
|
||||
raise SaltCloudNotFound('No securitygroup ID specified for this VM.')
|
||||
|
||||
if securitygroup and str(securitygroup) in sgs.keys():
|
||||
if securitygroup and str(securitygroup) in sgs:
|
||||
return sgs[securitygroup]['SecurityGroupId']
|
||||
raise SaltCloudNotFound(
|
||||
'The specified security group, {0!r}, could not be found.'.format(
|
||||
@ -407,7 +407,7 @@ def get_size(vm_):
|
||||
if not vm_size:
|
||||
raise SaltCloudNotFound('No size specified for this VM.')
|
||||
|
||||
if vm_size and str(vm_size) in sizes.keys():
|
||||
if vm_size and str(vm_size) in sizes:
|
||||
return sizes[vm_size]['InstanceTypeId']
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
@ -427,7 +427,7 @@ def __get_location(vm_):
|
||||
if not vm_location:
|
||||
raise SaltCloudNotFound('No location specified for this VM.')
|
||||
|
||||
if vm_location and str(vm_location) in locations.keys():
|
||||
if vm_location and str(vm_location) in locations:
|
||||
return locations[vm_location]['RegionId']
|
||||
raise SaltCloudNotFound(
|
||||
'The specified location, {0!r}, could not be found.'.format(
|
||||
@ -779,7 +779,7 @@ def show_disk(name, call=None):
|
||||
|
||||
for disk in items['Disks']['Disk']:
|
||||
ret[disk['DiskId']] = {}
|
||||
for item in disk.keys():
|
||||
for item in disk:
|
||||
ret[disk['DiskId']][item] = str(disk[item])
|
||||
|
||||
return ret
|
||||
@ -817,7 +817,7 @@ def list_monitor_data(kwargs=None, call=None):
|
||||
|
||||
for data in monitorData['InstanceMonitorData']:
|
||||
ret[data['InstanceId']] = {}
|
||||
for item in data.keys():
|
||||
for item in data:
|
||||
ret[data['InstanceId']][item] = str(data[item])
|
||||
|
||||
return ret
|
||||
@ -892,7 +892,7 @@ def show_image(kwargs, call=None):
|
||||
|
||||
for image in items['Images']['Image']:
|
||||
ret[image['ImageId']] = {}
|
||||
for item in image.keys():
|
||||
for item in image:
|
||||
ret[image['ImageId']][item] = str(image[item])
|
||||
|
||||
return ret
|
||||
|
@ -111,7 +111,7 @@ def __virtual__():
|
||||
|
||||
# Let's bring the functions imported from libcloud_aws to the current
|
||||
# namespace.
|
||||
keysdiff = set(POST_IMPORT_LOCALS_KEYS.keys()).difference(
|
||||
keysdiff = set(POST_IMPORT_LOCALS_KEYS).difference(
|
||||
PRE_IMPORT_LOCALS_KEYS
|
||||
)
|
||||
for key in keysdiff:
|
||||
|
@ -82,7 +82,7 @@ def avail_locations(call=None):
|
||||
ret = {}
|
||||
for region in items['regions']:
|
||||
ret[region['name']] = {}
|
||||
for item in region.keys():
|
||||
for item in region:
|
||||
ret[region['name']][item] = str(region[item])
|
||||
|
||||
return ret
|
||||
@ -102,7 +102,7 @@ def avail_images(call=None):
|
||||
ret = {}
|
||||
for image in items['images']:
|
||||
ret[image['id']] = {}
|
||||
for item in image.keys():
|
||||
for item in image:
|
||||
ret[image['id']][item] = str(image[item])
|
||||
|
||||
return ret
|
||||
@ -122,7 +122,7 @@ def avail_sizes(call=None):
|
||||
ret = {}
|
||||
for size in items['sizes']:
|
||||
ret[size['name']] = {}
|
||||
for item in size.keys():
|
||||
for item in size:
|
||||
ret[size['name']][item] = str(size[item])
|
||||
|
||||
return ret
|
||||
@ -165,7 +165,7 @@ def list_nodes_full(call=None):
|
||||
ret = {}
|
||||
for node in items['droplets']:
|
||||
ret[node['name']] = {}
|
||||
for item in node.keys():
|
||||
for item in node:
|
||||
value = node[item]
|
||||
if value is not None:
|
||||
value = str(value)
|
||||
@ -598,7 +598,7 @@ def list_keypairs(call=None):
|
||||
ret = {}
|
||||
for keypair in items['ssh_keys']:
|
||||
ret[keypair['name']] = {}
|
||||
for item in keypair.keys():
|
||||
for item in keypair:
|
||||
ret[keypair['name']][item] = str(keypair[item])
|
||||
|
||||
return ret
|
||||
|
@ -226,7 +226,7 @@ def _xml_to_dict(xmltree):
|
||||
if '}' in name:
|
||||
comps = name.split('}')
|
||||
name = comps[1]
|
||||
if name not in xmldict.keys():
|
||||
if name not in xmldict:
|
||||
if sys.version_info < (2, 7):
|
||||
children_len = len(item.getchildren())
|
||||
else:
|
||||
@ -329,7 +329,7 @@ def query(params=None, setname=None, requesturl=None, location=None,
|
||||
params_with_headers['SignatureMethod'] = 'HmacSHA256'
|
||||
params_with_headers['Timestamp'] = '{0}'.format(timestamp)
|
||||
params_with_headers['Version'] = ec2_api_version
|
||||
keys = sorted(params_with_headers.keys())
|
||||
keys = sorted(params_with_headers)
|
||||
values = map(params_with_headers.get, keys)
|
||||
querystring = urllib.urlencode(list(zip(keys, values)))
|
||||
|
||||
@ -928,7 +928,7 @@ def get_availability_zone(vm_):
|
||||
zones = _list_availability_zones()
|
||||
|
||||
# Validate user-specified AZ
|
||||
if avz not in zones.keys():
|
||||
if avz not in zones:
|
||||
raise SaltCloudException(
|
||||
'The specified availability zone isn\'t valid in this region: '
|
||||
'{0}\n'.format(
|
||||
@ -1987,7 +1987,7 @@ def create(vm_=None, call=None):
|
||||
'\'tag\' should be a dict.'
|
||||
)
|
||||
|
||||
for value in tags.values():
|
||||
for value in tags.itervalues():
|
||||
if not isinstance(value, str):
|
||||
raise SaltCloudConfigError(
|
||||
'\'tag\' values must be strings. Try quoting the values. '
|
||||
@ -2663,7 +2663,7 @@ def list_nodes_full(location=None, call=None):
|
||||
if not location:
|
||||
ret = {}
|
||||
locations = set(
|
||||
get_location(vm_) for vm_ in __opts__['profiles'].values()
|
||||
get_location(vm_) for vm_ in __opts__['profiles'].itervalues()
|
||||
if _vm_provider_driver(vm_)
|
||||
)
|
||||
for loc in locations:
|
||||
@ -3524,10 +3524,10 @@ def get_console_output(
|
||||
ret = {}
|
||||
data = query(params, return_root=True)
|
||||
for item in data:
|
||||
if item.keys()[0] == 'output':
|
||||
ret['output_decoded'] = binascii.a2b_base64(item.values()[0])
|
||||
if item.iterkeys().next() == 'output':
|
||||
ret['output_decoded'] = binascii.a2b_base64(item.itervalues().next())
|
||||
else:
|
||||
ret[item.keys()[0]] = item.values()[0]
|
||||
ret[item.iterkeys().next()] = item.itervalues().next()
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -42,66 +42,6 @@ Setting up Service Account Authentication:
|
||||
/etc/salt/cloud file as 'service_account_private_key' setting.
|
||||
- Consider using a more secure location for your private key.
|
||||
|
||||
Supported commands:
|
||||
|
||||
# Create a few instances fro profile_name in /etc/salt/cloud.profiles
|
||||
- salt-cloud -p profile_name inst1 inst2 inst3
|
||||
# Delete an instance
|
||||
- salt-cloud -d inst1
|
||||
# Look up data on an instance
|
||||
- salt-cloud -a show_instance inst2
|
||||
# List available locations (aka 'zones') for provider 'gce'
|
||||
- salt-cloud --list-locations gce
|
||||
# List available instance sizes (aka 'machine types') for provider 'gce'
|
||||
- salt-cloud --list-sizes gce
|
||||
# List available images for provider 'gce'
|
||||
- salt-cloud --list-images gce
|
||||
# Create a persistent disk
|
||||
- salt-cloud -f create_disk gce disk_name=pd location=us-central1-b ima...
|
||||
# Permanently delete a persistent disk
|
||||
- salt-cloud -f delete_disk gce disk_name=pd
|
||||
# Attach an existing disk to an existing instance
|
||||
- salt-cloud -a attach_disk myinstance disk_name=mydisk mode=READ_ONLY
|
||||
# Detach a disk from an instance
|
||||
- salt-cloud -a detach_disk myinstance disk_name=mydisk
|
||||
# Show information about the named disk
|
||||
- salt-cloud -a show_disk myinstance disk_name=pd
|
||||
- salt-cloud -f show_disk gce disk_name=pd
|
||||
# Create a snapshot of a persistent disk
|
||||
- salt-cloud -f create_snapshot gce name=snap-1 disk_name=pd
|
||||
# Permanently delete a disk snapshot
|
||||
- salt-cloud -f delete_snapshot gce name=snap-1
|
||||
# Show information about the named snapshot
|
||||
- salt-cloud -f show_snapshot gce name=snap-1
|
||||
# Create a network
|
||||
- salt-cloud -f create_network gce name=mynet cidr=10.10.10.0/24
|
||||
# Delete a network
|
||||
- salt-cloud -f delete_network gce name=mynet
|
||||
# Show info for a network
|
||||
- salt-cloud -f show_network gce name=mynet
|
||||
# Create a firewall rule
|
||||
- salt-cloud -f create_fwrule gce name=fw1 network=mynet allow=tcp:80
|
||||
# Delete a firewall rule
|
||||
- salt-cloud -f delete_fwrule gce name=fw1
|
||||
# Show info for a firewall rule
|
||||
-salt-cloud -f show_fwrule gce name=fw1
|
||||
# Create a load-balancer HTTP health check
|
||||
- salt-cloud -f create_hc gce name=hc path=/ port=80
|
||||
# Delete a load-balancer HTTP health check
|
||||
- salt-cloud -f delete_hc gce name=hc
|
||||
# Show info about an HTTP health check
|
||||
- salt-cloud -f show_hc gce name=hc
|
||||
# Create a load-balancer configuration
|
||||
- salt-cloud -f create_lb gce name=lb region=us-central1 ports=80 ...
|
||||
# Delete a load-balancer configuration
|
||||
- salt-cloud -f delete_lb gce name=lb
|
||||
# Show details about load-balancer
|
||||
- salt-cloud -f show_lb gce name=lb
|
||||
# Add member to load-balancer
|
||||
- salt-cloud -f attach_lb gce name=lb member=www1
|
||||
# Remove member from load-balancer
|
||||
- salt-cloud -f detach_lb gce name=lb member=www1
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
my-gce-config:
|
||||
|
@ -126,7 +126,7 @@ def get_image(vm_):
|
||||
|
||||
vm_image = config.get_cloud_config_value('image', vm_, __opts__)
|
||||
|
||||
if vm_image and str(vm_image) in images.keys():
|
||||
if vm_image and str(vm_image) in images:
|
||||
return images[vm_image]
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
@ -143,7 +143,7 @@ def get_size(vm_):
|
||||
if not vm_size:
|
||||
raise SaltCloudNotFound('No size specified for this VM.')
|
||||
|
||||
if vm_size and str(vm_size) in sizes.keys():
|
||||
if vm_size and str(vm_size) in sizes:
|
||||
return sizes[vm_size]
|
||||
|
||||
raise SaltCloudNotFound(
|
||||
@ -697,7 +697,7 @@ def get_node(name):
|
||||
:return: node object
|
||||
'''
|
||||
nodes = list_nodes()
|
||||
if name in nodes.keys():
|
||||
if name in nodes:
|
||||
return nodes[name]
|
||||
return None
|
||||
|
||||
@ -717,7 +717,7 @@ def joyent_node_state(id_):
|
||||
'deleted': 2,
|
||||
'unknown': 4}
|
||||
|
||||
if id_ not in states.keys():
|
||||
if id_ not in states:
|
||||
id_ = 'unknown'
|
||||
|
||||
return node_state(states[id_])
|
||||
@ -747,16 +747,16 @@ def reformat_node(item=None, full=False):
|
||||
|
||||
# add any undefined desired keys
|
||||
for key in desired_keys:
|
||||
if key not in item.keys():
|
||||
if key not in item:
|
||||
item[key] = None
|
||||
|
||||
# remove all the extra key value pairs to provide a brief listing
|
||||
if not full:
|
||||
for key in item.keys():
|
||||
for key in item:
|
||||
if key not in desired_keys:
|
||||
del item[key]
|
||||
|
||||
if 'state' in item.keys():
|
||||
if 'state' in item:
|
||||
item['state'] = joyent_node_state(item['state'])
|
||||
|
||||
return item
|
||||
@ -779,7 +779,7 @@ def list_nodes(full=False, call=None):
|
||||
|
||||
ret = {}
|
||||
if POLL_ALL_LOCATIONS:
|
||||
for location in JOYENT_LOCATIONS.keys():
|
||||
for location in JOYENT_LOCATIONS:
|
||||
result = query(command='my/machines', location=location,
|
||||
method='GET')
|
||||
nodes = result[1]
|
||||
|
@ -225,7 +225,7 @@ def get_image(conn, vm_):
|
||||
'ascii', 'salt-cloud-force-ascii'
|
||||
)
|
||||
|
||||
for img in image_list.keys():
|
||||
for img in image_list:
|
||||
if vm_image in (image_list[img]['id'], img):
|
||||
return image_list[img]['id']
|
||||
|
||||
@ -783,7 +783,7 @@ def list_nodes(call=None, **kwargs):
|
||||
|
||||
if not server_list:
|
||||
return {}
|
||||
for server in server_list.keys():
|
||||
for server in server_list:
|
||||
server_tmp = conn.server_show(server_list[server]['id'])[server]
|
||||
ret[server] = {
|
||||
'id': server_tmp['id'],
|
||||
@ -815,7 +815,7 @@ def list_nodes_full(call=None, **kwargs):
|
||||
|
||||
if not server_list:
|
||||
return {}
|
||||
for server in server_list.keys():
|
||||
for server in server_list:
|
||||
try:
|
||||
ret[server] = conn.server_show_libcloud(
|
||||
server_list[server]['id']
|
||||
|
@ -721,7 +721,7 @@ def list_nodes(call=None):
|
||||
nodes['error']['Errors']['Error']['Message']
|
||||
)
|
||||
)
|
||||
for node in nodes.keys():
|
||||
for node in nodes:
|
||||
ret[node] = {
|
||||
'id': nodes[node]['hostname'],
|
||||
'ram': nodes[node]['memoryCount'],
|
||||
|
@ -305,7 +305,7 @@ def _deploy(vm_):
|
||||
'''
|
||||
# TODO: review salt.utils.cloud.bootstrap(vm_, __opts__)
|
||||
# TODO: review salt.utils.cloud.wait_for_ip
|
||||
ip_address = wait_for_ip(vm_['name'])
|
||||
ip_address = wait_for_ip(vm_)
|
||||
|
||||
template_user = config.get_cloud_config_value(
|
||||
'template_user', vm_, __opts__
|
||||
|
@ -456,7 +456,7 @@ def list_nodes_full(conn=None, call=None):
|
||||
ret = {}
|
||||
for node in nodes:
|
||||
pairs = {}
|
||||
for key, value in zip(node.__dict__.keys(), node.__dict__.values()):
|
||||
for key, value in zip(node.__dict__, node.__dict__.itervalues()):
|
||||
pairs[key] = value
|
||||
ret[node.name] = pairs
|
||||
del ret[node.name]['driver']
|
||||
|
@ -639,13 +639,25 @@ def _validate_file_roots(opts):
|
||||
if not isinstance(opts['file_roots'], dict):
|
||||
log.warning('The file_roots parameter is not properly formatted,'
|
||||
' using defaults')
|
||||
return {'base': [salt.syspaths.BASE_FILE_ROOTS_DIR]}
|
||||
return {'base': _expand_glob_path([salt.syspaths.BASE_FILE_ROOTS_DIR])}
|
||||
for saltenv, dirs in list(opts['file_roots'].items()):
|
||||
if not isinstance(dirs, list) and not isinstance(dirs, tuple):
|
||||
opts['file_roots'][saltenv] = []
|
||||
opts['file_roots'][saltenv] = _expand_glob_path(opts['file_roots'][saltenv])
|
||||
return opts['file_roots']
|
||||
|
||||
|
||||
def _expand_glob_path(file_roots):
|
||||
'''
|
||||
Applies shell globbing to a set of directories and returns
|
||||
the expanded paths
|
||||
'''
|
||||
unglobbed_path = []
|
||||
for path in file_roots:
|
||||
unglobbed_path.extend(glob.glob(path))
|
||||
return unglobbed_path
|
||||
|
||||
|
||||
def _validate_opts(opts):
|
||||
'''
|
||||
Check that all of the types of values passed into the config are
|
||||
@ -1309,7 +1321,7 @@ def old_to_new(opts):
|
||||
for provider in providers:
|
||||
|
||||
provider_config = {}
|
||||
for opt in opts.keys():
|
||||
for opt in opts:
|
||||
if not opt.startswith(provider):
|
||||
continue
|
||||
value = opts.pop(opt)
|
||||
@ -1405,7 +1417,7 @@ def apply_vm_profiles_config(providers, overrides, defaults=None):
|
||||
vms.pop(profile)
|
||||
continue
|
||||
|
||||
driver = providers[details['provider']].keys()[0]
|
||||
driver = providers[details['provider']].iterkeys().next()
|
||||
providers[details['provider']][driver].setdefault(
|
||||
'profiles', {}).update({profile: details})
|
||||
details['provider'] = '{0[provider]}:{1}'.format(details, driver)
|
||||
@ -1440,7 +1452,7 @@ def apply_vm_profiles_config(providers, overrides, defaults=None):
|
||||
vms.pop(profile)
|
||||
continue
|
||||
|
||||
driver = providers[extended['provider']].keys()[0]
|
||||
driver = providers[extended['provider']].iterkeys().next()
|
||||
providers[extended['provider']][driver].setdefault(
|
||||
'profiles', {}).update({profile: extended})
|
||||
|
||||
@ -1757,7 +1769,7 @@ def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):
|
||||
if vm_['provider'] in opts['providers']:
|
||||
# There's only one driver defined for this provider. This is safe.
|
||||
alias_defs = opts['providers'].get(vm_['provider'])
|
||||
provider_driver_defs = alias_defs[alias_defs.keys()[0]]
|
||||
provider_driver_defs = alias_defs[alias_defs.iterkeys().next()]
|
||||
if name in provider_driver_defs:
|
||||
# The setting name exists in the VM's provider configuration.
|
||||
# Return it!
|
||||
|
@ -14,6 +14,7 @@ import multiprocessing
|
||||
import traceback
|
||||
import itertools
|
||||
from collections import deque
|
||||
import random
|
||||
|
||||
# Import salt libs
|
||||
import salt.daemons.masterapi
|
||||
@ -275,7 +276,8 @@ class SaltRaetRoadStackJoined(ioflo.base.deeding.Deed):
|
||||
joined = False
|
||||
if stack and isinstance(stack, RoadStack):
|
||||
if stack.remotes:
|
||||
joined = stack.remotes.values()[0].joined
|
||||
for remote in stack.remotes.values():
|
||||
joined = any([remote.joined for remote in stack.remotes.values()])
|
||||
self.status.update(joined=joined)
|
||||
|
||||
|
||||
@ -305,7 +307,7 @@ class SaltRaetRoadStackRejected(ioflo.base.deeding.Deed):
|
||||
rejected = False
|
||||
if stack and isinstance(stack, RoadStack):
|
||||
if stack.remotes:
|
||||
rejected = (stack.remotes.values()[0].acceptance
|
||||
rejected = (stack.remotes.itervalues().next().acceptance
|
||||
== raeting.acceptances.rejected)
|
||||
else: # no remotes so assume rejected
|
||||
rejected = True
|
||||
@ -361,7 +363,8 @@ class SaltRaetRoadStackAllowed(ioflo.base.deeding.Deed):
|
||||
allowed = False
|
||||
if stack and isinstance(stack, RoadStack):
|
||||
if stack.remotes:
|
||||
allowed = stack.remotes.values()[0].allowed
|
||||
for remote in stack.remotes.values():
|
||||
allowed = any([remote.allowed for remote in stack.remotes.values()])
|
||||
self.status.update(allowed=allowed)
|
||||
|
||||
|
||||
@ -521,10 +524,22 @@ class SaltLoadPillar(ioflo.base.deeding.Deed):
|
||||
'''
|
||||
Initial pillar
|
||||
'''
|
||||
# default master is the first remote
|
||||
# this default destination will not work with multiple masters
|
||||
# default master is the first remote that is allowed
|
||||
available_masters = [remote for remote in self.road_stack.value.remotes.values()
|
||||
if remote.allowed]
|
||||
while not available_masters:
|
||||
available_masters = [remote for remote in self.road_stack.value.remotes.values()
|
||||
if remote.allowed]
|
||||
time.sleep(0.1)
|
||||
|
||||
random_master = self.opts.value.get('random_master')
|
||||
if random_master:
|
||||
master = available_masters[random.randint(0, len(available_masters) - 1)]
|
||||
else:
|
||||
master = available_masters[0]
|
||||
|
||||
route = {'src': (self.road_stack.value.local.name, None, None),
|
||||
'dst': (self.road_stack.value.remotes.values()[0].name, None, 'remote_cmd')}
|
||||
'dst': (self.road_stack.value.remotes.itervalues().next().name, None, 'remote_cmd')}
|
||||
load = {'id': self.opts.value['id'],
|
||||
'grains': self.grains.value,
|
||||
'saltenv': self.opts.value['environment'],
|
||||
@ -882,17 +897,30 @@ class SaltRaetRouter(ioflo.base.deeding.Deed):
|
||||
if not self.road_stack.value.remotes:
|
||||
log.error("Missing joined master. Unable to route "
|
||||
"remote_cmd '{0}'.".format(msg))
|
||||
d_estate = self.road_stack.value.remotes.values()[0].name
|
||||
return
|
||||
#log.error("**** Missing destination estate for 'remote_cmd'. Unable to route "
|
||||
#"remote_cmd '{0}'.".format(msg))
|
||||
#return
|
||||
d_estate = self.road_stack.value.remotes.itervalues().next().name
|
||||
msg['route']['dst'] = (d_estate, d_yard, d_share)
|
||||
log.error("**** Missing destination estate for 'remote_cmd'. "
|
||||
"Using default route={0}.".format(msg['route']['dst']))
|
||||
self.road_stack.value.message(msg,
|
||||
self.road_stack.value.nameRemotes[d_estate].uid)
|
||||
elif d_share == 'call_cmd': # salt call minion to master
|
||||
elif d_share == 'call_cmd': # salt call return pub to master
|
||||
if not self.road_stack.value.remotes:
|
||||
log.error("Missing joined master. Unable to route "
|
||||
"call_cmd '{0}'.".format(msg))
|
||||
d_estate = self.road_stack.value.remotes.values()[0].name
|
||||
return
|
||||
#log.error("**** Missing destination estate for 'call_cmd'. Unable to route "
|
||||
#"call_cmd '{0}'.".format(msg))
|
||||
#return
|
||||
|
||||
d_estate = self.road_stack.value.remotes.itervalues().next().name
|
||||
d_share = 'remote_cmd'
|
||||
msg['route']['dst'] = (d_estate, d_yard, d_share)
|
||||
log.error("**** Missing destination estate for 'call_cmd'. "
|
||||
"Using default route={0}.".format(msg['route']['dst']))
|
||||
self.road_stack.value.message(msg,
|
||||
self.road_stack.value.nameRemotes[d_estate].uid)
|
||||
|
||||
@ -986,7 +1014,7 @@ class SaltRaetPublisher(ioflo.base.deeding.Deed):
|
||||
'''
|
||||
pub_data = pub_msg['return']
|
||||
# only publish to available minions by intersecting sets
|
||||
minions = self.availables.value & set(self.stack.value.nameRemotes.keys())
|
||||
minions = self.availables.value & set(self.stack.value.nameRemotes)
|
||||
for minion in minions:
|
||||
uid = self.stack.value.fetchUidByName(minion)
|
||||
if uid:
|
||||
|
@ -55,7 +55,7 @@ framer bootstrap be inactive first join
|
||||
|
||||
frame joined
|
||||
print Joined
|
||||
go next
|
||||
go next if elapsed >= 2
|
||||
|
||||
frame allow
|
||||
print Allowing...
|
||||
@ -69,14 +69,16 @@ framer bootstrap be inactive first join
|
||||
|
||||
frame allowed
|
||||
print Allowed
|
||||
go next
|
||||
go next if elapsed >= 2
|
||||
|
||||
frame pillar
|
||||
print Pillaring
|
||||
enter
|
||||
do salt load pillar
|
||||
go loading
|
||||
|
||||
frame loading
|
||||
print Loading
|
||||
enter
|
||||
do salt load modules
|
||||
go router
|
||||
|
@ -20,6 +20,13 @@ from raet.keeping import Keep
|
||||
|
||||
from salt.key import RaetKey
|
||||
|
||||
# Python equivalent of an enum
|
||||
APPL_KINDS = OrderedDict([('master', 0), ('minion', 1), ('syndic', 2), ('call', 3)])
|
||||
APPL_KIND_NAMES = odict((v, k) for k, v in APPL_KINDS.iteritems()) # inverse map
|
||||
ApplKind = namedtuple('ApplKind', APPL_KINDS)
|
||||
applKinds = ApplKind(**APPL_KINDS)
|
||||
|
||||
|
||||
class SaltKeep(Keep):
|
||||
'''
|
||||
RAET protocol estate on road data persistence for a given estate
|
||||
@ -105,8 +112,7 @@ class SaltKeep(Keep):
|
||||
return None
|
||||
|
||||
mid = data['role']
|
||||
statae = raeting.ACCEPTANCES.keys()
|
||||
for status in statae:
|
||||
for status in raeting.ACCEPTANCES:
|
||||
keydata = self.saltRaetKey.read_remote(mid, status)
|
||||
if keydata:
|
||||
break
|
||||
|
@ -326,7 +326,7 @@ class BasicTestCase(unittest.TestCase):
|
||||
self.assertEqual(main.local.priver.keyhex, mainData['prihex'])
|
||||
self.assertEqual(main.local.signer.keyhex, mainData['sighex'])
|
||||
|
||||
self.assertEqual(len(main.remotes.values()), 2)
|
||||
self.assertEqual(len(main.remotes), 2)
|
||||
|
||||
# other stack
|
||||
opts = self.createOpts(role='other',
|
||||
@ -564,7 +564,7 @@ class BasicTestCase(unittest.TestCase):
|
||||
self.assertEqual(main.local.priver.keyhex, mainData['prihex'])
|
||||
self.assertEqual(main.local.signer.keyhex, mainData['sighex'])
|
||||
|
||||
self.assertEqual(len(main.remotes.values()), 2)
|
||||
self.assertEqual(len(main.remotes), 2)
|
||||
|
||||
# other stack
|
||||
opts = self.createOpts(role='other',
|
||||
@ -804,7 +804,7 @@ class BasicTestCase(unittest.TestCase):
|
||||
self.assertEqual(main.local.priver.keyhex, mainData['prihex'])
|
||||
self.assertEqual(main.local.signer.keyhex, mainData['sighex'])
|
||||
|
||||
self.assertEqual(len(main.remotes.values()), 2)
|
||||
self.assertEqual(len(main.remotes), 2)
|
||||
|
||||
# other stack
|
||||
opts = self.createOpts(role='other',
|
||||
@ -1057,7 +1057,7 @@ class BasicTestCase(unittest.TestCase):
|
||||
self.assertEqual(main.local.priver.keyhex, mainData['prihex'])
|
||||
self.assertEqual(main.local.signer.keyhex, mainData['sighex'])
|
||||
|
||||
self.assertEqual(len(main.remotes.values()), 2)
|
||||
self.assertEqual(len(main.remotes), 2)
|
||||
for data in [data1, data2]:
|
||||
remote = main.nameRemotes[data['name']]
|
||||
self.assertEqual(remote.name, data['name'])
|
||||
@ -1200,7 +1200,7 @@ class BasicTestCase(unittest.TestCase):
|
||||
self.assertEqual(main.local.priver.keyhex, mainData['prihex'])
|
||||
self.assertEqual(main.local.signer.keyhex, mainData['sighex'])
|
||||
|
||||
self.assertEqual(len(main.remotes.values()), 2)
|
||||
self.assertEqual(len(main.remotes), 2)
|
||||
for data in [data1, data2]:
|
||||
remote = main.nameRemotes[data['name']]
|
||||
self.assertEqual(remote.name, data['name'])
|
||||
@ -1351,7 +1351,7 @@ class BasicTestCase(unittest.TestCase):
|
||||
self.assertEqual(main.local.priver.keyhex, mainData['prihex'])
|
||||
self.assertEqual(main.local.signer.keyhex, mainData['sighex'])
|
||||
|
||||
self.assertEqual(len(main.remotes.values()), 2)
|
||||
self.assertEqual(len(main.remotes), 2)
|
||||
for data in [data1, data2]:
|
||||
remote = main.nameRemotes[data['name']]
|
||||
self.assertEqual(remote.name, data['name'])
|
||||
@ -1458,21 +1458,21 @@ class BasicTestCase(unittest.TestCase):
|
||||
self.service(main, other, duration=1.0)
|
||||
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
self.assertEqual(len(other.transactions), 0)
|
||||
remote = other.remotes.values()[0]
|
||||
remote = other.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
|
||||
self.allow(other, main)
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
self.assertEqual(len(other.transactions), 0)
|
||||
remote = other.remotes.values()[0]
|
||||
remote = other.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
|
||||
for remote in main.remotes.values():
|
||||
for remote in main.remotes.itervalues():
|
||||
path = os.path.join(main.keep.remotedirpath,
|
||||
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
|
||||
self.assertTrue(os.path.exists(path))
|
||||
@ -1580,21 +1580,21 @@ class BasicTestCase(unittest.TestCase):
|
||||
|
||||
self.join(other, main)
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
self.assertEqual(len(other.transactions), 0)
|
||||
remote = other.remotes.values()[0]
|
||||
remote = other.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
|
||||
self.allow(other, main)
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
self.assertEqual(len(other.transactions), 0)
|
||||
remote = other.remotes.values()[0]
|
||||
remote = other.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
|
||||
for remote in main.remotes.values():
|
||||
for remote in main.remotes.itervalues():
|
||||
path = os.path.join(main.keep.remotedirpath,
|
||||
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
|
||||
self.assertTrue(os.path.exists(path))
|
||||
@ -1702,21 +1702,21 @@ class BasicTestCase(unittest.TestCase):
|
||||
|
||||
self.join(other, main)
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
self.assertEqual(len(other.transactions), 0)
|
||||
remote = other.remotes.values()[0]
|
||||
remote = other.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
|
||||
self.allow(other, main)
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
self.assertEqual(len(other.transactions), 0)
|
||||
remote = other.remotes.values()[0]
|
||||
remote = other.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
|
||||
for remote in main.remotes.values():
|
||||
for remote in main.remotes.itervalues():
|
||||
path = os.path.join(main.keep.remotedirpath,
|
||||
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
|
||||
self.assertTrue(os.path.exists(path))
|
||||
@ -1828,21 +1828,21 @@ class BasicTestCase(unittest.TestCase):
|
||||
self.service(main, other1, duration=1.0)
|
||||
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
self.assertEqual(len(other1.transactions), 0)
|
||||
remote = other1.remotes.values()[0]
|
||||
remote = other1.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
|
||||
self.allow(other1, main)
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
self.assertEqual(len(other1.transactions), 0)
|
||||
remote = other1.remotes.values()[0]
|
||||
remote = other1.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
|
||||
for remote in main.remotes.values():
|
||||
for remote in main.remotes.itervalues():
|
||||
path = os.path.join(main.keep.remotedirpath,
|
||||
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
|
||||
self.assertTrue(os.path.exists(path))
|
||||
@ -1954,21 +1954,21 @@ class BasicTestCase(unittest.TestCase):
|
||||
self.join(other2, main)
|
||||
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
self.assertEqual(len(other2.transactions), 0)
|
||||
remote = other2.remotes.values()[0]
|
||||
remote = other2.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
|
||||
self.allow(other2, main)
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
self.assertEqual(len(other2.transactions), 0)
|
||||
remote = other2.remotes.values()[0]
|
||||
remote = other2.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
|
||||
for remote in main.remotes.values():
|
||||
for remote in main.remotes.itervalues():
|
||||
path = os.path.join(main.keep.remotedirpath,
|
||||
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
|
||||
self.assertTrue(os.path.exists(path))
|
||||
@ -2079,21 +2079,21 @@ class BasicTestCase(unittest.TestCase):
|
||||
|
||||
self.join(other1, main)
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
self.assertEqual(len(other1.transactions), 0)
|
||||
remote = other1.remotes.values()[0]
|
||||
remote = other1.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
|
||||
self.allow(other1, main)
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
self.assertEqual(len(other1.transactions), 0)
|
||||
remote = other1.remotes.values()[0]
|
||||
remote = other1.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
|
||||
for remote in main.remotes.values():
|
||||
for remote in main.remotes.itervalues():
|
||||
path = os.path.join(main.keep.remotedirpath,
|
||||
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
|
||||
self.assertTrue(os.path.exists(path))
|
||||
@ -2148,21 +2148,21 @@ class BasicTestCase(unittest.TestCase):
|
||||
self.join(other2, main)
|
||||
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
self.assertEqual(len(other2.transactions), 0)
|
||||
remote = other2.remotes.values()[0]
|
||||
remote = other2.remotes.itervalues().next()
|
||||
self.assertTrue(remote.joined)
|
||||
|
||||
self.allow(other2, main)
|
||||
self.assertEqual(len(main.transactions), 0)
|
||||
remote = main.remotes.values()[0]
|
||||
remote = main.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
self.assertEqual(len(other2.transactions), 0)
|
||||
remote = other2.remotes.values()[0]
|
||||
remote = other2.remotes.itervalues().next()
|
||||
self.assertTrue(remote.allowed)
|
||||
|
||||
for remote in main.remotes.values():
|
||||
for remote in main.remotes.itervalues():
|
||||
path = os.path.join(main.keep.remotedirpath,
|
||||
"{0}.{1}.{2}".format(main.keep.prefix, remote.name, main.keep.ext))
|
||||
self.assertTrue(os.path.exists(path))
|
||||
|
@ -13,13 +13,6 @@ class SaltException(Exception):
|
||||
Base exception class; all Salt-specific exceptions should subclass this
|
||||
'''
|
||||
|
||||
def pack(self):
|
||||
'''
|
||||
Pack this exception into a serializable dictionary that is safe for
|
||||
transport via msgpack
|
||||
'''
|
||||
return dict(message=self.__unicode__(), args=self.args)
|
||||
|
||||
|
||||
class SaltClientError(SaltException):
|
||||
'''
|
||||
|
@ -14,7 +14,7 @@ import requests
|
||||
|
||||
# Import salt libs
|
||||
from salt.exceptions import (
|
||||
CommandExecutionError, MinionError, SaltReqTimeoutError
|
||||
CommandExecutionError, MinionError
|
||||
)
|
||||
import salt.client
|
||||
import salt.crypt
|
||||
@ -985,11 +985,8 @@ class RemoteClient(Client):
|
||||
load['loc'] = 0
|
||||
else:
|
||||
load['loc'] = fn_.tell()
|
||||
try:
|
||||
channel = self._get_channel()
|
||||
data = channel.send(load)
|
||||
except SaltReqTimeoutError:
|
||||
return ''
|
||||
channel = self._get_channel()
|
||||
data = channel.send(load)
|
||||
if not data:
|
||||
if init_retries:
|
||||
init_retries -= 1
|
||||
@ -1053,11 +1050,9 @@ class RemoteClient(Client):
|
||||
load = {'saltenv': saltenv,
|
||||
'prefix': prefix,
|
||||
'cmd': '_file_list'}
|
||||
try:
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
except SaltReqTimeoutError:
|
||||
return ''
|
||||
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
|
||||
def file_list_emptydirs(self, saltenv='base', prefix='', env=None):
|
||||
'''
|
||||
@ -1076,11 +1071,8 @@ class RemoteClient(Client):
|
||||
load = {'saltenv': saltenv,
|
||||
'prefix': prefix,
|
||||
'cmd': '_file_list_emptydirs'}
|
||||
try:
|
||||
channel = self._get_channel()
|
||||
channel.send(load)
|
||||
except SaltReqTimeoutError:
|
||||
return ''
|
||||
channel = self._get_channel()
|
||||
channel.send(load)
|
||||
|
||||
def dir_list(self, saltenv='base', prefix='', env=None):
|
||||
'''
|
||||
@ -1099,11 +1091,8 @@ class RemoteClient(Client):
|
||||
load = {'saltenv': saltenv,
|
||||
'prefix': prefix,
|
||||
'cmd': '_dir_list'}
|
||||
try:
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
except SaltReqTimeoutError:
|
||||
return ''
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
|
||||
def symlink_list(self, saltenv='base', prefix='', env=None):
|
||||
'''
|
||||
@ -1112,11 +1101,8 @@ class RemoteClient(Client):
|
||||
load = {'saltenv': saltenv,
|
||||
'prefix': prefix,
|
||||
'cmd': '_symlink_list'}
|
||||
try:
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
except SaltReqTimeoutError:
|
||||
return ''
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
|
||||
def hash_file(self, path, saltenv='base', env=None):
|
||||
'''
|
||||
@ -1151,11 +1137,8 @@ class RemoteClient(Client):
|
||||
load = {'path': path,
|
||||
'saltenv': saltenv,
|
||||
'cmd': '_file_hash'}
|
||||
try:
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
except SaltReqTimeoutError:
|
||||
return ''
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
|
||||
def list_env(self, saltenv='base', env=None):
|
||||
'''
|
||||
@ -1173,33 +1156,24 @@ class RemoteClient(Client):
|
||||
|
||||
load = {'saltenv': saltenv,
|
||||
'cmd': '_file_list'}
|
||||
try:
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
except SaltReqTimeoutError:
|
||||
return ''
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
|
||||
def envs(self):
|
||||
'''
|
||||
Return a list of available environments
|
||||
'''
|
||||
load = {'cmd': '_file_envs'}
|
||||
try:
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
except SaltReqTimeoutError:
|
||||
return ''
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
|
||||
def master_opts(self):
|
||||
'''
|
||||
Return the master opts data
|
||||
'''
|
||||
load = {'cmd': '_master_opts'}
|
||||
try:
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
except SaltReqTimeoutError:
|
||||
return ''
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
|
||||
def ext_nodes(self):
|
||||
'''
|
||||
@ -1211,11 +1185,8 @@ class RemoteClient(Client):
|
||||
'opts': self.opts}
|
||||
if self.auth:
|
||||
load['tok'] = self.auth.gen_token('salt')
|
||||
try:
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
except SaltReqTimeoutError:
|
||||
return ''
|
||||
channel = self._get_channel()
|
||||
return channel.send(load)
|
||||
|
||||
|
||||
class FSClient(RemoteClient):
|
||||
|
@ -190,11 +190,6 @@ def diff_mtime_map(map1, map2):
|
||||
'''
|
||||
Is there a change to the mtime map? return a boolean
|
||||
'''
|
||||
# check if the file lists are different
|
||||
if cmp(sorted(map1.keys()), sorted(map2.keys())) != 0:
|
||||
#log.debug('diff_mtime_map: the keys are different')
|
||||
return True
|
||||
|
||||
# check if the mtimes are the same
|
||||
if cmp(sorted(map1), sorted(map2)) != 0:
|
||||
#log.debug('diff_mtime_map: the maps are different')
|
||||
|
@ -52,6 +52,7 @@ import re
|
||||
import shutil
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
from salt._compat import text_type as _text_type
|
||||
|
||||
VALID_PROVIDERS = ('gitpython', 'pygit2', 'dulwich')
|
||||
PER_REMOTE_PARAMS = ('base', 'mountpoint', 'root')
|
||||
@ -614,14 +615,18 @@ def init():
|
||||
|
||||
per_remote_defaults = {}
|
||||
for param in override_params:
|
||||
per_remote_defaults[param] = __opts__['gitfs_{0}'.format(param)]
|
||||
per_remote_defaults[param] = \
|
||||
_text_type(__opts__['gitfs_{0}'.format(param)])
|
||||
|
||||
for remote in __opts__['gitfs_remotes']:
|
||||
repo_conf = copy.deepcopy(per_remote_defaults)
|
||||
bad_per_remote_conf = False
|
||||
if isinstance(remote, dict):
|
||||
repo_url = next(iter(remote))
|
||||
per_remote_conf = salt.utils.repack_dictlist(remote[repo_url])
|
||||
per_remote_conf = dict(
|
||||
[(key, _text_type(val)) for key, val in
|
||||
salt.utils.repack_dictlist(remote[repo_url]).items()]
|
||||
)
|
||||
if not per_remote_conf:
|
||||
log.error(
|
||||
'Invalid per-remote configuration for remote {0}. If no '
|
||||
@ -1253,10 +1258,9 @@ def serve_file(load, fnd):
|
||||
required_load_keys = set(['path', 'loc', 'saltenv'])
|
||||
if not all(x in load for x in required_load_keys):
|
||||
log.debug(
|
||||
'Not all of the required key in load are present. Missing: {0}'.format(
|
||||
', '.join(
|
||||
required_load_keys.difference(load.keys())
|
||||
)
|
||||
'Not all of the required keys present in payload. '
|
||||
'Missing: {0}'.format(
|
||||
', '.join(required_load_keys.difference(load))
|
||||
)
|
||||
)
|
||||
return ret
|
||||
|
@ -35,6 +35,7 @@ import logging
|
||||
import os
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
from salt._compat import text_type as _text_type
|
||||
|
||||
VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed')
|
||||
PER_REMOTE_PARAMS = ('base', 'branch_method', 'mountpoint', 'root')
|
||||
@ -170,19 +171,23 @@ def init():
|
||||
|
||||
per_remote_defaults = {}
|
||||
for param in PER_REMOTE_PARAMS:
|
||||
per_remote_defaults[param] = __opts__['hgfs_{0}'.format(param)]
|
||||
per_remote_defaults[param] = \
|
||||
_text_type(__opts__['hgfs_{0}'.format(param)])
|
||||
|
||||
for remote in __opts__['hgfs_remotes']:
|
||||
repo_conf = copy.deepcopy(per_remote_defaults)
|
||||
if isinstance(remote, dict):
|
||||
repo_uri = next(iter(remote))
|
||||
per_remote_conf = salt.utils.repack_dictlist(remote[repo_uri])
|
||||
repo_url = next(iter(remote))
|
||||
per_remote_conf = dict(
|
||||
[(key, _text_type(val)) for key, val in
|
||||
salt.utils.repack_dictlist(remote[repo_url]).items()]
|
||||
)
|
||||
if not per_remote_conf:
|
||||
log.error(
|
||||
'Invalid per-remote configuration for remote {0}. If no '
|
||||
'per-remote parameters are being specified, there may be '
|
||||
'a trailing colon after the URI, which should be removed. '
|
||||
'Check the master configuration file.'.format(repo_uri)
|
||||
'Check the master configuration file.'.format(repo_url)
|
||||
)
|
||||
|
||||
branch_method = \
|
||||
@ -192,7 +197,7 @@ def init():
|
||||
log.error(
|
||||
'Invalid branch_method {0!r} for remote {1}. Valid '
|
||||
'branch methods are: {2}. This remote will be ignored.'
|
||||
.format(branch_method, repo_uri,
|
||||
.format(branch_method, repo_url,
|
||||
', '.join(VALID_BRANCH_METHODS))
|
||||
)
|
||||
continue
|
||||
@ -203,18 +208,18 @@ def init():
|
||||
'Invalid configuration parameter {0!r} for remote {1}. '
|
||||
'Valid parameters are: {2}. See the documentation for '
|
||||
'further information.'.format(
|
||||
param, repo_uri, ', '.join(PER_REMOTE_PARAMS)
|
||||
param, repo_url, ', '.join(PER_REMOTE_PARAMS)
|
||||
)
|
||||
)
|
||||
per_remote_conf.pop(param)
|
||||
repo_conf.update(per_remote_conf)
|
||||
else:
|
||||
repo_uri = remote
|
||||
repo_url = remote
|
||||
|
||||
if not isinstance(repo_uri, string_types):
|
||||
if not isinstance(repo_url, string_types):
|
||||
log.error(
|
||||
'Invalid gitfs remote {0}. Remotes must be strings, you may '
|
||||
'need to enclose the URI in quotes'.format(repo_uri)
|
||||
'need to enclose the URI in quotes'.format(repo_url)
|
||||
)
|
||||
continue
|
||||
|
||||
@ -227,7 +232,7 @@ def init():
|
||||
pass
|
||||
|
||||
hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))
|
||||
repo_hash = hash_type(repo_uri).hexdigest()
|
||||
repo_hash = hash_type(repo_url).hexdigest()
|
||||
rp_ = os.path.join(bp_, repo_hash)
|
||||
if not os.path.isdir(rp_):
|
||||
os.makedirs(rp_)
|
||||
@ -243,7 +248,7 @@ def init():
|
||||
'Cache path {0} (corresponding remote: {1}) exists but is not '
|
||||
'a valid mercurial repository. You will need to manually '
|
||||
'delete this directory on the master to continue to use this '
|
||||
'hgfs remote.'.format(rp_, repo_uri)
|
||||
'hgfs remote.'.format(rp_, repo_url)
|
||||
)
|
||||
continue
|
||||
|
||||
@ -253,11 +258,11 @@ def init():
|
||||
hgconfpath = os.path.join(rp_, '.hg', 'hgrc')
|
||||
with salt.utils.fopen(hgconfpath, 'w+') as hgconfig:
|
||||
hgconfig.write('[paths]\n')
|
||||
hgconfig.write('default = {0}\n'.format(repo_uri))
|
||||
hgconfig.write('default = {0}\n'.format(repo_url))
|
||||
|
||||
repo_conf.update({
|
||||
'repo': repo,
|
||||
'uri': repo_uri,
|
||||
'url': repo_url,
|
||||
'hash': repo_hash,
|
||||
'cachedir': rp_
|
||||
})
|
||||
@ -271,7 +276,7 @@ def init():
|
||||
timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
|
||||
fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp))
|
||||
for repo in repos:
|
||||
fp_.write('{0} = {1}\n'.format(repo['hash'], repo['uri']))
|
||||
fp_.write('{0} = {1}\n'.format(repo['hash'], repo['url']))
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
@ -323,7 +328,7 @@ def update():
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception {0} caught while updating hgfs remote {1}'
|
||||
.format(exc, repo['uri']),
|
||||
.format(exc, repo['url']),
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
)
|
||||
else:
|
||||
|
@ -256,7 +256,7 @@ def file_list(load):
|
||||
if not metadata or saltenv not in metadata:
|
||||
return ret
|
||||
|
||||
for buckets in _find_files(metadata[saltenv]).values():
|
||||
for buckets in _find_files(metadata[saltenv]).itervalues():
|
||||
files = filter(lambda f: not fs.is_file_ignored(__opts__, f), buckets)
|
||||
ret += _trim_env_off_path(files, saltenv)
|
||||
|
||||
@ -297,7 +297,7 @@ def dir_list(load):
|
||||
return ret
|
||||
|
||||
# grab all the dirs from the buckets cache file
|
||||
for dirs in _find_dirs(metadata[saltenv]).values():
|
||||
for dirs in _find_dirs(metadata[saltenv]).itervalues():
|
||||
# trim env and trailing slash
|
||||
dirs = _trim_env_off_path(dirs, saltenv, trim_slash=True)
|
||||
# remove empty string left by the base env dir in single bucket mode
|
||||
|
@ -30,6 +30,7 @@ import logging
|
||||
import os
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
from salt._compat import text_type as _text_type
|
||||
|
||||
PER_REMOTE_PARAMS = ('mountpoint', 'root', 'trunk', 'branches', 'tags')
|
||||
|
||||
@ -90,7 +91,7 @@ def _rev(repo):
|
||||
log.error(
|
||||
'Error retrieving revision ID for svnfs remote {0} '
|
||||
'(cachedir: {1}): {2}'
|
||||
.format(repo['uri'], repo['repo'], exc)
|
||||
.format(repo['url'], repo['repo'], exc)
|
||||
)
|
||||
else:
|
||||
return repo_info['revision'].number
|
||||
@ -107,19 +108,23 @@ def init():
|
||||
|
||||
per_remote_defaults = {}
|
||||
for param in PER_REMOTE_PARAMS:
|
||||
per_remote_defaults[param] = __opts__['svnfs_{0}'.format(param)]
|
||||
per_remote_defaults[param] = \
|
||||
_text_type(__opts__['svnfs_{0}'.format(param)])
|
||||
|
||||
for remote in __opts__['svnfs_remotes']:
|
||||
repo_conf = copy.deepcopy(per_remote_defaults)
|
||||
if isinstance(remote, dict):
|
||||
repo_uri = next(iter(remote))
|
||||
per_remote_conf = salt.utils.repack_dictlist(remote[repo_uri])
|
||||
repo_url = next(iter(remote))
|
||||
per_remote_conf = dict(
|
||||
[(key, _text_type(val)) for key, val in
|
||||
salt.utils.repack_dictlist(remote[repo_url]).items()]
|
||||
)
|
||||
if not per_remote_conf:
|
||||
log.error(
|
||||
'Invalid per-remote configuration for remote {0}. If no '
|
||||
'per-remote parameters are being specified, there may be '
|
||||
'a trailing colon after the URI, which should be removed. '
|
||||
'Check the master configuration file.'.format(repo_uri)
|
||||
'Check the master configuration file.'.format(repo_url)
|
||||
)
|
||||
|
||||
for param in (x for x in per_remote_conf
|
||||
@ -128,18 +133,18 @@ def init():
|
||||
'Invalid configuration parameter {0!r} for remote {1}. '
|
||||
'Valid parameters are: {2}. See the documentation for '
|
||||
'further information.'.format(
|
||||
param, repo_uri, ', '.join(PER_REMOTE_PARAMS)
|
||||
param, repo_url, ', '.join(PER_REMOTE_PARAMS)
|
||||
)
|
||||
)
|
||||
per_remote_conf.pop(param)
|
||||
repo_conf.update(per_remote_conf)
|
||||
else:
|
||||
repo_uri = remote
|
||||
repo_url = remote
|
||||
|
||||
if not isinstance(repo_uri, string_types):
|
||||
if not isinstance(repo_url, string_types):
|
||||
log.error(
|
||||
'Invalid gitfs remote {0}. Remotes must be strings, you may '
|
||||
'need to enclose the URI in quotes'.format(repo_uri)
|
||||
'need to enclose the URI in quotes'.format(repo_url)
|
||||
)
|
||||
continue
|
||||
|
||||
@ -152,7 +157,7 @@ def init():
|
||||
pass
|
||||
|
||||
hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))
|
||||
repo_hash = hash_type(repo_uri).hexdigest()
|
||||
repo_hash = hash_type(repo_url).hexdigest()
|
||||
rp_ = os.path.join(bp_, repo_hash)
|
||||
if not os.path.isdir(rp_):
|
||||
os.makedirs(rp_)
|
||||
@ -160,13 +165,13 @@ def init():
|
||||
if not os.listdir(rp_):
|
||||
# Only attempt a new checkout if the directory is empty.
|
||||
try:
|
||||
CLIENT.checkout(repo_uri, rp_)
|
||||
CLIENT.checkout(repo_url, rp_)
|
||||
repos.append(rp_)
|
||||
new_remote = True
|
||||
except pysvn._pysvn.ClientError as exc:
|
||||
log.error(
|
||||
'Failed to initialize svnfs remote {0!r}: {1}'
|
||||
.format(repo_uri, exc)
|
||||
.format(repo_url, exc)
|
||||
)
|
||||
continue
|
||||
else:
|
||||
@ -179,13 +184,13 @@ def init():
|
||||
'Cache path {0} (corresponding remote: {1}) exists but is '
|
||||
'not a valid subversion checkout. You will need to '
|
||||
'manually delete this directory on the master to continue '
|
||||
'to use this svnfs remote.'.format(rp_, repo_uri)
|
||||
'to use this svnfs remote.'.format(rp_, repo_url)
|
||||
)
|
||||
continue
|
||||
|
||||
repo_conf.update({
|
||||
'repo': rp_,
|
||||
'uri': repo_uri,
|
||||
'url': repo_url,
|
||||
'hash': repo_hash,
|
||||
'cachedir': rp_
|
||||
})
|
||||
@ -200,7 +205,7 @@ def init():
|
||||
for repo_conf in repos:
|
||||
fp_.write(
|
||||
'{0} = {1}\n'.format(
|
||||
repo_conf['hash'], repo_conf['uri']
|
||||
repo_conf['hash'], repo_conf['url']
|
||||
)
|
||||
)
|
||||
except OSError:
|
||||
@ -253,7 +258,7 @@ def update():
|
||||
except pysvn._pysvn.ClientError as exc:
|
||||
log.error(
|
||||
'Error updating svnfs remote {0} (cachedir: {1}): {2}'
|
||||
.format(repo['uri'], repo['cachedir'], exc)
|
||||
.format(repo['url'], repo['cachedir'], exc)
|
||||
)
|
||||
try:
|
||||
os.remove(lk_fn)
|
||||
@ -328,7 +333,7 @@ def envs(ignore_cache=False):
|
||||
log.error(
|
||||
'svnfs trunk path {0!r} does not exist in repo {1}, no base '
|
||||
'environment will be provided by this remote'
|
||||
.format(repo['trunk'], repo['uri'])
|
||||
.format(repo['trunk'], repo['url'])
|
||||
)
|
||||
|
||||
branches = os.path.join(repo['repo'], repo['branches'])
|
||||
@ -337,7 +342,7 @@ def envs(ignore_cache=False):
|
||||
else:
|
||||
log.error(
|
||||
'svnfs branches path {0!r} does not exist in repo {1}'
|
||||
.format(repo['branches'], repo['uri'])
|
||||
.format(repo['branches'], repo['url'])
|
||||
)
|
||||
|
||||
tags = os.path.join(repo['repo'], repo['tags'])
|
||||
@ -346,7 +351,7 @@ def envs(ignore_cache=False):
|
||||
else:
|
||||
log.error(
|
||||
'svnfs tags path {0!r} does not exist in repo {1}'
|
||||
.format(repo['tags'], repo['uri'])
|
||||
.format(repo['tags'], repo['url'])
|
||||
)
|
||||
return [x for x in sorted(ret) if _env_is_exposed(x)]
|
||||
|
||||
|
@ -187,6 +187,15 @@ def returners(opts, functions, whitelist=None):
|
||||
)
|
||||
|
||||
|
||||
def utils(opts, whitelist=None):
|
||||
'''
|
||||
Returns the utility modules
|
||||
'''
|
||||
load = _create_loader(opts, 'utils', 'utils',
|
||||
ext_type_dirs='utils_dirs')
|
||||
return LazyLoader(load, whitelist=whitelist)
|
||||
|
||||
|
||||
def pillars(opts, functions):
|
||||
'''
|
||||
Returns the pillars modules
|
||||
@ -716,9 +725,9 @@ class Loader(object):
|
||||
mod.__salt__ = functions
|
||||
try:
|
||||
context = sys.modules[
|
||||
functions[functions.keys()[0]].__module__
|
||||
functions[functions.iterkeys().next()].__module__
|
||||
].__context__
|
||||
except (AttributeError, IndexError):
|
||||
except (AttributeError, StopIteration):
|
||||
context = {}
|
||||
mod.__context__ = context
|
||||
return funcs
|
||||
|
@ -677,6 +677,11 @@ def __remove_temp_logging_handler():
|
||||
logging.captureWarnings(True)
|
||||
|
||||
|
||||
# Let's setup a global exception hook handler which will log all exceptions
|
||||
# Store a reference to the original handler
|
||||
__GLOBAL_EXCEPTION_HANDLER = sys.excepthook
|
||||
|
||||
|
||||
def __global_logging_exception_handler(exc_type, exc_value, exc_traceback):
|
||||
'''
|
||||
This function will log all python exceptions.
|
||||
@ -693,7 +698,7 @@ def __global_logging_exception_handler(exc_type, exc_value, exc_traceback):
|
||||
)
|
||||
)
|
||||
# Call the original sys.excepthook
|
||||
sys.__excepthook__(exc_type, exc_value, exc_traceback)
|
||||
__GLOBAL_EXCEPTION_HANDLER(exc_type, exc_value, exc_traceback)
|
||||
|
||||
|
||||
# Set our own exception handler as the one to use
|
||||
|
@ -89,6 +89,42 @@ class SMaster(object):
|
||||
return salt.daemons.masterapi.access_keys(self.opts)
|
||||
|
||||
|
||||
class Scheduler(multiprocessing.Process):
|
||||
'''
|
||||
The master scheduler process.
|
||||
|
||||
This runs in its own process so that it can have a fully
|
||||
independent loop from the Maintenance process.
|
||||
'''
|
||||
def __init__(self, opts):
|
||||
super(Scheduler, self).__init__()
|
||||
self.opts = opts
|
||||
# Init Scheduler
|
||||
self.schedule = salt.utils.schedule.Schedule(self.opts,
|
||||
salt.loader.runner(self.opts),
|
||||
returners=salt.loader.returners(self.opts, {}))
|
||||
|
||||
def run(self):
|
||||
salt.utils.appendproctitle('Scheduler')
|
||||
while True:
|
||||
self.handle_schedule()
|
||||
try:
|
||||
time.sleep(self.schedule.loop_interval)
|
||||
except KeyboardInterrupt:
|
||||
break
|
||||
|
||||
def handle_schedule(self):
|
||||
'''
|
||||
Evaluate the scheduler
|
||||
'''
|
||||
try:
|
||||
self.schedule.eval()
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception {0} occurred in scheduled job'.format(exc)
|
||||
)
|
||||
|
||||
|
||||
class Maintenance(multiprocessing.Process):
|
||||
'''
|
||||
A generalized maintenence process which performances maintenence
|
||||
@ -104,14 +140,7 @@ class Maintenance(multiprocessing.Process):
|
||||
self.opts = opts
|
||||
# Init fileserver manager
|
||||
self.fileserver = salt.fileserver.Fileserver(self.opts)
|
||||
# Load Runners
|
||||
self.runners = salt.loader.runner(self.opts)
|
||||
# Load Returners
|
||||
self.returners = salt.loader.returners(self.opts, {})
|
||||
# Init Scheduler
|
||||
self.schedule = salt.utils.schedule.Schedule(self.opts,
|
||||
self.runners,
|
||||
returners=self.returners)
|
||||
# Matcher
|
||||
self.ckminions = salt.utils.minions.CkMinions(self.opts)
|
||||
# Make Event bus for firing
|
||||
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
|
||||
@ -147,7 +176,6 @@ class Maintenance(multiprocessing.Process):
|
||||
salt.daemons.masterapi.clean_expired_tokens(self.opts)
|
||||
self.handle_search(now, last)
|
||||
self.handle_pillargit()
|
||||
self.handle_schedule()
|
||||
self.handle_presence(old_present)
|
||||
self.handle_key_rotate(now)
|
||||
salt.daemons.masterapi.fileserver_update(self.fileserver)
|
||||
@ -194,21 +222,6 @@ class Maintenance(multiprocessing.Process):
|
||||
log.error('Exception {0} occurred in file server update '
|
||||
'for git_pillar module.'.format(exc))
|
||||
|
||||
def handle_schedule(self):
|
||||
'''
|
||||
Evaluate the scheduler
|
||||
'''
|
||||
try:
|
||||
self.schedule.eval()
|
||||
# Check if scheduler requires lower loop interval than
|
||||
# the loop_interval setting
|
||||
if self.schedule.loop_interval < self.loop_interval:
|
||||
self.loop_interval = self.schedule.loop_interval
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception {0} occurred in scheduled job'.format(exc)
|
||||
)
|
||||
|
||||
def handle_presence(self, old_present):
|
||||
'''
|
||||
Fire presence events if enabled
|
||||
@ -302,17 +315,6 @@ class Master(SMaster):
|
||||
)
|
||||
)
|
||||
|
||||
def __handle_error_react(self, event):
|
||||
log.error('Received minion error from [{minion}]: {data}'.format(minion=event['id'], data=event['data']['exception']))
|
||||
|
||||
def __register_reactions(self):
|
||||
'''
|
||||
Register any reactions the master will need
|
||||
'''
|
||||
log.info('Registering master reactions')
|
||||
log.info('Registering master error handling')
|
||||
self.opts['reactor'].append({'_salt_error': self.__handle_error_react})
|
||||
|
||||
def _pre_flight(self):
|
||||
'''
|
||||
Run pre flight checks. If anything in this method fails then the master
|
||||
@ -320,7 +322,6 @@ class Master(SMaster):
|
||||
'''
|
||||
errors = []
|
||||
fileserver = salt.fileserver.Fileserver(self.opts)
|
||||
self.__register_reactions()
|
||||
if not fileserver.servers:
|
||||
errors.append(
|
||||
'Failed to load fileserver backends, the configured backends '
|
||||
@ -351,6 +352,8 @@ class Master(SMaster):
|
||||
process_manager = salt.utils.process.ProcessManager()
|
||||
log.info('Creating master maintenance process')
|
||||
process_manager.add_process(Maintenance, args=(self.opts,))
|
||||
log.info('Creating master scheduler process')
|
||||
process_manager.add_process(Scheduler, args=(self.opts,))
|
||||
log.info('Creating master publisher process')
|
||||
process_manager.add_process(Publisher, args=(self.opts,))
|
||||
log.info('Creating master event publisher process')
|
||||
@ -1138,7 +1141,7 @@ class AESFuncs(object):
|
||||
return False
|
||||
load['grains']['id'] = load['id']
|
||||
mods = set()
|
||||
for func in self.mminion.functions.values():
|
||||
for func in self.mminion.functions.itervalues():
|
||||
mods.add(func.__module__)
|
||||
for mod in mods:
|
||||
sys.modules[mod].__grains__ = load['grains']
|
||||
|
@ -66,7 +66,6 @@ import salt.utils.args
|
||||
import salt.utils.event
|
||||
import salt.utils.minion
|
||||
import salt.utils.schedule
|
||||
import salt.utils.error
|
||||
import salt.exitcodes
|
||||
|
||||
from salt.defaults import DEFAULT_TARGET_DELIM
|
||||
@ -190,7 +189,7 @@ def load_args_and_kwargs(func, args, data=None):
|
||||
'by salt.utils.args.parse_input() before calling '
|
||||
'salt.minion.load_args_and_kwargs().'
|
||||
)
|
||||
if argspec.keywords or string_kwarg.keys()[0] in argspec.args:
|
||||
if argspec.keywords or string_kwarg.iterkeys().next() in argspec.args:
|
||||
# Function supports **kwargs or is a positional argument to
|
||||
# the function.
|
||||
_kwargs.update(string_kwarg)
|
||||
@ -513,7 +512,12 @@ class MultiMinion(MinionBase):
|
||||
|
||||
while True:
|
||||
package = None
|
||||
|
||||
for minion in minions.itervalues():
|
||||
if isinstance(minion, dict):
|
||||
minion = minion['minion']
|
||||
if not hasattr(minion, 'schedule'):
|
||||
continue
|
||||
loop_interval = self.process_schedule(minion, loop_interval)
|
||||
socks = dict(self.poller.poll(1))
|
||||
if socks.get(self.epull_sock) == zmq.POLLIN:
|
||||
try:
|
||||
@ -618,7 +622,7 @@ class Minion(MinionBase):
|
||||
|
||||
# add default scheduling jobs to the minions scheduler
|
||||
if 'mine.update' in self.functions:
|
||||
log.info('Added mine.update to schedular')
|
||||
log.info('Added mine.update to scheduler')
|
||||
self.schedule.add_job({
|
||||
'__mine_interval':
|
||||
{
|
||||
@ -1091,7 +1095,6 @@ class Minion(MinionBase):
|
||||
except Exception:
|
||||
msg = 'The minion function caused an exception'
|
||||
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
|
||||
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
|
||||
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
|
||||
ret['out'] = 'nested'
|
||||
else:
|
||||
@ -1625,10 +1628,6 @@ class Minion(MinionBase):
|
||||
|
||||
self.schedule.modify_job(name='__master_alive',
|
||||
schedule=schedule)
|
||||
elif package.startswith('_salt_error'):
|
||||
tag, data = salt.utils.event.MinionEvent.unpack(package)
|
||||
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
|
||||
self._fire_master(data, tag)
|
||||
|
||||
# Main Minion Tune In
|
||||
def tune_in(self):
|
||||
@ -1807,7 +1806,7 @@ class Minion(MinionBase):
|
||||
self._running = False
|
||||
if getattr(self, 'poller', None) is not None:
|
||||
if isinstance(self.poller.sockets, dict):
|
||||
for socket in self.poller.sockets.keys():
|
||||
for socket in self.poller.sockets:
|
||||
if socket.closed is False:
|
||||
socket.close()
|
||||
self.poller.unregister(socket)
|
||||
|
@ -436,7 +436,7 @@ def config(name, config, edit=True):
|
||||
'''
|
||||
|
||||
for entry in config:
|
||||
key = entry.keys()[0]
|
||||
key = entry.iterkeys().next()
|
||||
configs = _parse_config(entry[key], key)
|
||||
if edit:
|
||||
with salt.utils.fopen(name, 'w') as configfile:
|
||||
|
@ -232,7 +232,7 @@ def latest_version(*names, **kwargs):
|
||||
|
||||
virtpkgs = _get_virtual()
|
||||
all_virt = set()
|
||||
for provides in virtpkgs.values():
|
||||
for provides in virtpkgs.itervalues():
|
||||
all_virt.update(provides)
|
||||
|
||||
for name in names:
|
||||
@ -457,7 +457,7 @@ def install(name=None,
|
||||
refreshdb = False
|
||||
for pkg in pkgs:
|
||||
if isinstance(pkg, dict):
|
||||
_name = pkg.keys()[0]
|
||||
_name = pkg.iterkeys().next()
|
||||
_latest_version = latest_version(_name, refresh=False, show_installed=True)
|
||||
_version = pkg[_name]
|
||||
# If the versions don't match, refresh is True, otherwise no need to refresh
|
||||
@ -1162,8 +1162,7 @@ def get_repo(repo, **kwargs):
|
||||
ppa_name, dist)
|
||||
else:
|
||||
if HAS_SOFTWAREPROPERTIES:
|
||||
repo = softwareproperties.ppa.expand_ppa_line(
|
||||
repo,
|
||||
repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(
|
||||
__grains__['lsb_distrib_codename'])[0]
|
||||
else:
|
||||
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
|
||||
@ -1186,7 +1185,7 @@ def get_repo(repo, **kwargs):
|
||||
.format(repo)
|
||||
)
|
||||
|
||||
for source in repos.values():
|
||||
for source in repos.itervalues():
|
||||
for sub in source:
|
||||
if (sub['type'] == repo_type and
|
||||
# strip trailing '/' from repo_uri, it's valid in definition
|
||||
@ -1235,7 +1234,7 @@ def del_repo(repo, **kwargs):
|
||||
else:
|
||||
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
|
||||
else:
|
||||
repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0]
|
||||
repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[0]
|
||||
|
||||
sources = sourceslist.SourcesList()
|
||||
repos = [s for s in sources.list if not s.invalid]
|
||||
@ -1626,8 +1625,7 @@ def expand_repo_def(repokwargs):
|
||||
dist)
|
||||
else:
|
||||
if HAS_SOFTWAREPROPERTIES:
|
||||
repo = softwareproperties.ppa.expand_ppa_line(
|
||||
repo, dist)[0]
|
||||
repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[0]
|
||||
else:
|
||||
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
|
||||
|
||||
@ -1894,5 +1892,5 @@ def owner(*paths):
|
||||
if 'no path found' in ret[path].lower():
|
||||
ret[path] = ''
|
||||
if len(ret) == 1:
|
||||
return ret.values()[0]
|
||||
return ret.itervalues().next()
|
||||
return ret
|
||||
|
@ -1,13 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
A module to wrap archive calls
|
||||
A module to wrap (non-Windows) archive calls
|
||||
|
||||
.. versionadded:: 2014.1.0
|
||||
'''
|
||||
|
||||
# Import salt libs
|
||||
import salt._compat
|
||||
from salt.utils import which as _which, which_bin as _which_bin
|
||||
from salt.utils import \
|
||||
which as _which, which_bin as _which_bin, is_windows as _is_windows
|
||||
import salt.utils.decorators as decorators
|
||||
|
||||
# TODO: Check that the passed arguments are correct
|
||||
@ -19,6 +20,8 @@ __func_alias__ = {
|
||||
|
||||
|
||||
def __virtual__():
|
||||
if _is_windows():
|
||||
return False
|
||||
commands = ('tar', 'gzip', 'gunzip', 'zip', 'unzip', 'rar', 'unrar')
|
||||
# If none of the above commands are in $PATH this module is a no-go
|
||||
if not any(_which(cmd) for cmd in commands):
|
||||
|
@ -1227,18 +1227,16 @@ def describe(vpc_id=None, region=None, key=None, keyid=None, profile=None):
|
||||
state=None,
|
||||
tags=None,
|
||||
dhcp_options_id=None,
|
||||
instance_tenancy=None
|
||||
)
|
||||
instance_tenancy=None)
|
||||
|
||||
if not conn:
|
||||
return False
|
||||
|
||||
if not vpc_id:
|
||||
raise SaltInvocationError(
|
||||
'VPC ID needs to be specified.')
|
||||
raise SaltInvocationError('VPC ID needs to be specified.')
|
||||
|
||||
try:
|
||||
filter_parameters = {'filters': {'vpc-id': vpc_id}}
|
||||
filter_parameters = {'vpc_ids': vpc_id}
|
||||
|
||||
vpcs = conn.get_all_vpcs(**filter_parameters)
|
||||
|
||||
|
@ -1174,20 +1174,20 @@ def _parse_settings_eth(opts, iface_type, enabled, iface):
|
||||
|
||||
iface_data['inet']['ethtool'] = ethtool
|
||||
# return a list of sorted keys to ensure consistent order
|
||||
iface_data['inet']['ethtool_keys'] = sorted(ethtool.keys())
|
||||
iface_data['inet']['ethtool_keys'] = sorted(ethtool)
|
||||
|
||||
if iface_type == 'bridge':
|
||||
bridging = _parse_bridge_opts(opts, iface)
|
||||
if bridging:
|
||||
iface_data['inet']['bridging'] = bridging
|
||||
iface_data['inet']['bridging_keys'] = sorted(bridging.keys())
|
||||
iface_data['inet']['bridging_keys'] = sorted(bridging)
|
||||
|
||||
elif iface_type == 'bond':
|
||||
bonding = _parse_settings_bond(opts, iface)
|
||||
if bonding:
|
||||
iface_data['inet']['bonding'] = bonding
|
||||
iface_data['inet']['bonding']['slaves'] = opts['slaves']
|
||||
iface_data['inet']['bonding_keys'] = sorted(bonding.keys())
|
||||
iface_data['inet']['bonding_keys'] = sorted(bonding)
|
||||
|
||||
elif iface_type == 'slave':
|
||||
adapters[iface]['master'] = opts['master']
|
||||
|
@ -91,7 +91,7 @@ def set_config(config_file='/etc/dnsmasq.conf', follow=True, **kwargs):
|
||||
if filename.endswith('#') and filename.endswith('#'):
|
||||
continue
|
||||
includes.append('{0}/{1}'.format(dnsopts['conf-dir'], filename))
|
||||
for key in kwargs.keys():
|
||||
for key in kwargs:
|
||||
if key in dnsopts:
|
||||
if isinstance(dnsopts[key], str):
|
||||
for config in includes:
|
||||
|
@ -155,7 +155,7 @@ def parse_zone(zonefile=None, zone=None):
|
||||
line = multi.replace('(', '').replace(')', '')
|
||||
else:
|
||||
continue
|
||||
if 'ORIGIN' in zonedict.keys():
|
||||
if 'ORIGIN' in zonedict:
|
||||
comps = line.replace('@', zonedict['ORIGIN']).split()
|
||||
else:
|
||||
comps = line.split()
|
||||
@ -179,7 +179,7 @@ def parse_zone(zonefile=None, zone=None):
|
||||
if comps[2] == 'NS':
|
||||
zonedict.setdefault('NS', []).append(comps[3])
|
||||
elif comps[2] == 'MX':
|
||||
if 'MX' not in zonedict.keys():
|
||||
if 'MX' not in zonedict:
|
||||
zonedict.setdefault('MX', []).append({'priority': comps[3],
|
||||
'host': comps[4]})
|
||||
else:
|
||||
|
@ -1751,7 +1751,7 @@ def _run_wrapper(status, container, func, cmd, *args, **kwargs):
|
||||
_invalid(status, id_=container, comment='Container is not running')
|
||||
return status
|
||||
full_cmd = ('nsenter --target {pid} --mount --uts --ipc --net --pid'
|
||||
' {cmd}'.format(pid=container_pid, cmd=cmd))
|
||||
' -- {cmd}'.format(pid=container_pid, cmd=cmd))
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
'Unknown docker ExecutionDriver {0!r}. Or didn\'t find command'
|
||||
|
@ -113,7 +113,7 @@ def file_list(*packages):
|
||||
'description': ' '.join(comps[3:])}
|
||||
if 'No packages found' in line:
|
||||
errors.append(line)
|
||||
for pkg in pkgs.keys():
|
||||
for pkg in pkgs:
|
||||
files = []
|
||||
cmd = 'dpkg -L {0}'.format(pkg)
|
||||
for line in __salt__['cmd.run'](cmd).splitlines():
|
||||
@ -155,7 +155,7 @@ def file_dict(*packages):
|
||||
'description': ' '.join(comps[3:])}
|
||||
if 'No packages found' in line:
|
||||
errors.append(line)
|
||||
for pkg in pkgs.keys():
|
||||
for pkg in pkgs:
|
||||
files = []
|
||||
cmd = 'dpkg -L {0}'.format(pkg)
|
||||
for line in __salt__['cmd.run'](cmd).splitlines():
|
||||
|
84
salt/modules/drbd.py
Normal file
84
salt/modules/drbd.py
Normal file
@ -0,0 +1,84 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
DRBD administration module
|
||||
'''
|
||||
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def overview():
|
||||
'''
|
||||
Show status of the DRBD devices
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' drbd.overview
|
||||
'''
|
||||
cmd = 'drbd-overview'
|
||||
for line in __salt__['cmd.run'](cmd).splitlines():
|
||||
ret = {}
|
||||
fields = line.strip().split()
|
||||
minnum = fields[0].split(':')[0]
|
||||
device = fields[0].split(':')[1]
|
||||
connstate = fields[1]
|
||||
role = fields[2].split('/')
|
||||
localrole = role[0]
|
||||
partnerrole = role[1]
|
||||
diskstate = fields[3].split('/')
|
||||
localdiskstate = diskstate[0]
|
||||
partnerdiskstate = diskstate[1]
|
||||
if localdiskstate == "UpToDate":
|
||||
if partnerdiskstate == "UpToDate":
|
||||
if fields[4]:
|
||||
mountpoint = fields[4]
|
||||
fs_mounted = fields[5]
|
||||
totalsize = fields[6]
|
||||
usedsize = fields[7]
|
||||
remainsize = fields[8]
|
||||
perc = fields[9]
|
||||
ret = {
|
||||
'minor number': minnum,
|
||||
'device': device,
|
||||
'connection state': connstate,
|
||||
'local role': localrole,
|
||||
'partner role': partnerrole,
|
||||
'local disk state': localdiskstate,
|
||||
'partner disk state': partnerdiskstate,
|
||||
'mountpoint': mountpoint,
|
||||
'fs': fs_mounted,
|
||||
'total size': totalsize,
|
||||
'used': usedsize,
|
||||
'remains': remainsize,
|
||||
'percent': perc,
|
||||
}
|
||||
else:
|
||||
ret = {
|
||||
'minor number': minnum,
|
||||
'device': device,
|
||||
'connection state': connstate,
|
||||
'local role': localrole,
|
||||
'partner role': partnerrole,
|
||||
'local disk state': localdiskstate,
|
||||
'partner disk state': partnerdiskstate,
|
||||
}
|
||||
else:
|
||||
syncbar = fields[4]
|
||||
synced = fields[6]
|
||||
syncedbytes = fields[7]
|
||||
sync = synced+syncedbytes
|
||||
ret = {
|
||||
'minor number': minnum,
|
||||
'device': device,
|
||||
'connection state': connstate,
|
||||
'local role': localrole,
|
||||
'partner role': partnerrole,
|
||||
'local disk state': localdiskstate,
|
||||
'partner disk state': partnerdiskstate,
|
||||
'synchronisation: ': syncbar,
|
||||
'synched': sync,
|
||||
}
|
||||
return ret
|
@ -135,7 +135,7 @@ def setenv(environ, false_unsets=False, clear_all=False, update_minion=False):
|
||||
return False
|
||||
if clear_all is True:
|
||||
# Unset any keys not defined in 'environ' dict supplied by user
|
||||
to_unset = [key for key in os.environ.keys() if key not in environ]
|
||||
to_unset = [key for key in os.environ if key not in environ]
|
||||
for key in to_unset:
|
||||
ret[key] = setval(key, False, false_unsets)
|
||||
for key, val in environ.items():
|
||||
|
@ -548,7 +548,7 @@ def check_hash(path, file_hash):
|
||||
return get_hash(path, hash_form) == hash_value
|
||||
|
||||
|
||||
def find(path, **kwargs):
|
||||
def find(path, *args, **kwargs):
|
||||
'''
|
||||
Approximate the Unix ``find(1)`` command and return a list of paths that
|
||||
meet the specified criteria.
|
||||
@ -662,6 +662,11 @@ def find(path, **kwargs):
|
||||
salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime
|
||||
salt '*' file.find /var/log name=\\*.[0-9] mtime=+30d size=+10m delete
|
||||
'''
|
||||
if 'delete' in args:
|
||||
kwargs['delete'] = 'f'
|
||||
elif 'print' in args:
|
||||
kwargs['print'] = 'path'
|
||||
|
||||
try:
|
||||
finder = salt.utils.find.Finder(kwargs)
|
||||
except ValueError as ex:
|
||||
@ -2148,7 +2153,7 @@ def access(path, mode):
|
||||
|
||||
if mode in modes:
|
||||
return os.access(path, modes[mode])
|
||||
elif mode in modes.values():
|
||||
elif mode in modes.itervalues():
|
||||
return os.access(path, mode)
|
||||
else:
|
||||
raise SaltInvocationError('Invalid mode specified.')
|
||||
@ -4106,7 +4111,7 @@ def open_files(by_pid=False):
|
||||
|
||||
# Then we look at the open files for each PID
|
||||
files = {}
|
||||
for pid in pids.keys():
|
||||
for pid in pids:
|
||||
ppath = '/proc/{0}'.format(pid)
|
||||
try:
|
||||
tids = os.listdir('{0}/task'.format(ppath))
|
||||
|
@ -475,7 +475,7 @@ def file_list(*packages):
|
||||
'''
|
||||
ret = file_dict(*packages)
|
||||
files = []
|
||||
for pkg_files in ret['files'].values():
|
||||
for pkg_files in ret['files'].itervalues():
|
||||
files.extend(pkg_files)
|
||||
ret['files'] = files
|
||||
return ret
|
||||
|
@ -249,7 +249,7 @@ def avail_platforms():
|
||||
salt myminion genesis.avail_platforms
|
||||
'''
|
||||
ret = {}
|
||||
for platform in CMD_MAP.keys():
|
||||
for platform in CMD_MAP:
|
||||
ret[platform] = True
|
||||
for cmd in CMD_MAP[platform]:
|
||||
if not salt.utils.which(cmd):
|
||||
|
@ -27,7 +27,7 @@ def mount_image(location):
|
||||
mnt = __salt__['qemu_nbd.init'](location)
|
||||
if not mnt:
|
||||
return ''
|
||||
first = mnt.keys()[0]
|
||||
first = mnt.iterkeys().next()
|
||||
__context__['img.mnt_{0}'.format(first)] = mnt
|
||||
return first
|
||||
return ''
|
||||
@ -95,4 +95,4 @@ def bootstrap(location, size, fmt):
|
||||
__salt__['partition.probe'](nbd)
|
||||
__salt__['partition.mkfs']('{0}p1'.format(nbd), 'ext4')
|
||||
mnt = __salt__['qemu_nbd.mount'](nbd)
|
||||
#return __salt__['pkg.bootstrap'](nbd, mnt.keys()[0])
|
||||
#return __salt__['pkg.bootstrap'](nbd, mnt.iterkeys().next())
|
||||
|
@ -56,7 +56,7 @@ def running_service_owners(
|
||||
for service in execs:
|
||||
if path == execs[service]:
|
||||
pkg = __salt__['pkg.owner'](path)
|
||||
ret[service] = pkg.values()[0]
|
||||
ret[service] = pkg.itervalues().next()
|
||||
|
||||
return ret
|
||||
|
||||
@ -94,7 +94,7 @@ def enabled_service_owners():
|
||||
continue
|
||||
start_cmd = data['ExecStart']['path']
|
||||
pkg = __salt__['pkg.owner'](start_cmd)
|
||||
ret[service] = pkg.values()[0]
|
||||
ret[service] = pkg.itervalues().next()
|
||||
|
||||
return ret
|
||||
|
||||
@ -131,7 +131,7 @@ def service_highstate(requires=True):
|
||||
if requires:
|
||||
exists = False
|
||||
for item in ret[service]['service']:
|
||||
if isinstance(item, dict) and item.keys()[0] == 'require':
|
||||
if isinstance(item, dict) and item.iterkeys().next() == 'require':
|
||||
exists = True
|
||||
if not exists:
|
||||
ret[service]['service'].append(
|
||||
|
@ -878,7 +878,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4
|
||||
**connection_args)[user]['id']
|
||||
else:
|
||||
user = user_get(user_id, profile=profile,
|
||||
**connection_args).keys()[0]['name']
|
||||
**connection_args).iterkeys().next()['name']
|
||||
if not user_id:
|
||||
return {'Error': 'Unable to resolve user id'}
|
||||
|
||||
@ -887,7 +887,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4
|
||||
**connection_args)[tenant]['id']
|
||||
else:
|
||||
tenant = tenant_get(tenant_id, profile=profile,
|
||||
**connection_args).keys()[0]['name']
|
||||
**connection_args).iterkeys().next()['name']
|
||||
if not tenant_id:
|
||||
return {'Error': 'Unable to resolve tenant id'}
|
||||
|
||||
@ -896,7 +896,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4
|
||||
**connection_args)[role]['id']
|
||||
else:
|
||||
role = role_get(role_id, profile=profile,
|
||||
**connection_args).keys()[0]['name']
|
||||
**connection_args).iterkeys().next()['name']
|
||||
if not role_id:
|
||||
return {'Error': 'Unable to resolve role id'}
|
||||
|
||||
@ -927,7 +927,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4
|
||||
**connection_args)[user]['id']
|
||||
else:
|
||||
user = user_get(user_id, profile=profile,
|
||||
**connection_args).keys()[0]['name']
|
||||
**connection_args).iterkeys().next()['name']
|
||||
if not user_id:
|
||||
return {'Error': 'Unable to resolve user id'}
|
||||
|
||||
@ -936,7 +936,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4
|
||||
**connection_args)[tenant]['id']
|
||||
else:
|
||||
tenant = tenant_get(tenant_id, profile=profile,
|
||||
**connection_args).keys()[0]['name']
|
||||
**connection_args).iterkeys().next()['name']
|
||||
if not tenant_id:
|
||||
return {'Error': 'Unable to resolve tenant id'}
|
||||
|
||||
@ -944,7 +944,7 @@ role_id=ce377245c4ec9b70e1c639c89e8cead4
|
||||
role_id = role_get(name=role, profile=profile,
|
||||
**connection_args)[role]['id']
|
||||
else:
|
||||
role = role_get(role_id).keys()[0]['name']
|
||||
role = role_get(role_id).iterkeys().next()['name']
|
||||
if not role_id:
|
||||
return {'Error': 'Unable to resolve role id'}
|
||||
|
||||
|
@ -85,7 +85,7 @@ def _service_by_name(name):
|
||||
# Match on label
|
||||
return services[name]
|
||||
|
||||
for service in services.values():
|
||||
for service in services.itervalues():
|
||||
if service['file_path'].lower() == name:
|
||||
# Match on full path
|
||||
return service
|
||||
|
@ -5,6 +5,7 @@ Support for Linux File Access Control Lists
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'acl'
|
||||
@ -35,7 +36,12 @@ def version():
|
||||
return ret[1].strip()
|
||||
|
||||
|
||||
def getfacl(*args):
|
||||
def _raise_on_no_files(*args):
|
||||
if len(args) == 0:
|
||||
raise CommandExecutionError('You need to specify at least one file or directory to work with!')
|
||||
|
||||
|
||||
def getfacl(*args, **kwargs):
|
||||
'''
|
||||
Return (extremely verbose) map of FACLs on specified file(s)
|
||||
|
||||
@ -45,9 +51,16 @@ def getfacl(*args):
|
||||
|
||||
salt '*' acl.getfacl /tmp/house/kitchen
|
||||
salt '*' acl.getfacl /tmp/house/kitchen /tmp/house/livingroom
|
||||
salt '*' acl.getfacl /tmp/house/kitchen /tmp/house/livingroom recursive=True
|
||||
'''
|
||||
recursive = kwargs.pop('recursive', False)
|
||||
|
||||
_raise_on_no_files(*args)
|
||||
|
||||
ret = {}
|
||||
cmd = 'getfacl -p'
|
||||
if recursive:
|
||||
cmd += ' -R'
|
||||
for dentry in args:
|
||||
cmd += ' {0}'.format(dentry)
|
||||
out = __salt__['cmd.run'](cmd).splitlines()
|
||||
@ -81,24 +94,24 @@ def getfacl(*args):
|
||||
del vals['type']
|
||||
for entity in ('user', 'group'):
|
||||
plural = entity + 's'
|
||||
if entity in vals.keys():
|
||||
if entity in vals:
|
||||
usergroup = vals[entity]
|
||||
del vals[entity]
|
||||
if acl_type == 'acl':
|
||||
ret[dentry][plural].append({usergroup: vals})
|
||||
elif acl_type == 'default':
|
||||
if 'defaults' not in ret[dentry].keys():
|
||||
if 'defaults' not in ret[dentry]:
|
||||
ret[dentry]['defaults'] = {}
|
||||
if plural not in ret[dentry]['defaults'].keys():
|
||||
if plural not in ret[dentry]['defaults']:
|
||||
ret[dentry]['defaults'][plural] = []
|
||||
ret[dentry]['defaults'][plural].append({usergroup: vals})
|
||||
for entity in ('other', 'mask'):
|
||||
if entity in vals.keys():
|
||||
if entity in vals:
|
||||
del vals[entity]
|
||||
if acl_type == 'acl':
|
||||
ret[dentry][entity] = vals
|
||||
elif acl_type == 'default':
|
||||
if 'defaults' not in ret[dentry].keys():
|
||||
if 'defaults' not in ret[dentry]:
|
||||
ret[dentry]['defaults'] = {}
|
||||
ret[dentry]['defaults'][entity] = vals
|
||||
return ret
|
||||
@ -147,7 +160,7 @@ def _parse_acl(acl, user, group):
|
||||
return vals
|
||||
|
||||
|
||||
def wipefacls(*args):
|
||||
def wipefacls(*args, **kwargs):
|
||||
'''
|
||||
Remove all FACLs from the specified file(s)
|
||||
|
||||
@ -157,15 +170,21 @@ def wipefacls(*args):
|
||||
|
||||
salt '*' acl.wipefacls /tmp/house/kitchen
|
||||
salt '*' acl.wipefacls /tmp/house/kitchen /tmp/house/livingroom
|
||||
salt '*' acl.wipefacls /tmp/house/kitchen /tmp/house/livingroom recursive=True
|
||||
'''
|
||||
recursive = kwargs.pop('recursive', False)
|
||||
|
||||
_raise_on_no_files(*args)
|
||||
cmd = 'setfacl -b'
|
||||
if recursive:
|
||||
cmd += ' -R'
|
||||
for dentry in args:
|
||||
cmd += ' {0}'.format(dentry)
|
||||
__salt__['cmd.run'](cmd)
|
||||
return True
|
||||
|
||||
|
||||
def modfacl(acl_type, acl_name, perms, *args):
|
||||
def modfacl(acl_type, acl_name, perms, *args, **kwargs):
|
||||
'''
|
||||
Add or modify a FACL for the specified file(s)
|
||||
|
||||
@ -177,8 +196,15 @@ def modfacl(acl_type, acl_name, perms, *args):
|
||||
salt '*' acl.modfacl default:group mygroup rx /tmp/house/kitchen
|
||||
salt '*' acl.modfacl d:u myuser 7 /tmp/house/kitchen
|
||||
salt '*' acl.modfacl g mygroup 0 /tmp/house/kitchen /tmp/house/livingroom
|
||||
salt '*' acl.modfacl user myuser rwx /tmp/house/kitchen recursive=True
|
||||
'''
|
||||
recursive = kwargs.pop('recursive', False)
|
||||
|
||||
_raise_on_no_files(*args)
|
||||
|
||||
cmd = 'setfacl -m'
|
||||
if recursive:
|
||||
cmd += ' -R'
|
||||
|
||||
prefix = ''
|
||||
if acl_type.startswith('d'):
|
||||
@ -197,7 +223,7 @@ def modfacl(acl_type, acl_name, perms, *args):
|
||||
return True
|
||||
|
||||
|
||||
def delfacl(acl_type, acl_name, *args):
|
||||
def delfacl(acl_type, acl_name, *args, **kwargs):
|
||||
'''
|
||||
Remove specific FACL from the specified file(s)
|
||||
|
||||
@ -209,8 +235,15 @@ def delfacl(acl_type, acl_name, *args):
|
||||
salt '*' acl.delfacl default:group mygroup /tmp/house/kitchen
|
||||
salt '*' acl.delfacl d:u myuser /tmp/house/kitchen
|
||||
salt '*' acl.delfacl g myuser /tmp/house/kitchen /tmp/house/livingroom
|
||||
salt '*' acl.delfacl user myuser /tmp/house/kitchen recursive=True
|
||||
'''
|
||||
recursive = kwargs.pop('recursive', False)
|
||||
|
||||
_raise_on_no_files(*args)
|
||||
|
||||
cmd = 'setfacl -x'
|
||||
if recursive:
|
||||
cmd += ' -R'
|
||||
|
||||
prefix = ''
|
||||
if acl_type.startswith('d'):
|
||||
|
@ -200,7 +200,7 @@ def pvcreate(devices, **kwargs):
|
||||
'pvmetadatacopies', 'metadatacopies', 'metadataignore',
|
||||
'restorefile', 'norestorefile', 'labelsector',
|
||||
'setphysicalvolumesize')
|
||||
for var in kwargs.keys():
|
||||
for var in kwargs:
|
||||
if kwargs[var] and var in valid:
|
||||
cmd.append('--{0}'.format(var))
|
||||
cmd.append(kwargs[var])
|
||||
@ -246,7 +246,7 @@ def vgcreate(vgname, devices, **kwargs):
|
||||
cmd.append(device)
|
||||
valid = ('clustered', 'maxlogicalvolumes', 'maxphysicalvolumes',
|
||||
'vgmetadatacopies', 'metadatacopies', 'physicalextentsize')
|
||||
for var in kwargs.keys():
|
||||
for var in kwargs:
|
||||
if kwargs[var] and var in valid:
|
||||
cmd.append('--{0}'.format(var))
|
||||
cmd.append(kwargs[var])
|
||||
|
@ -141,15 +141,16 @@ def assign(name, value):
|
||||
cmd = 'sysctl -w {0}="{1}"'.format(name, value)
|
||||
data = __salt__['cmd.run_all'](cmd)
|
||||
out = data['stdout']
|
||||
err = data['stderr']
|
||||
|
||||
# Example:
|
||||
# # sysctl -w net.ipv4.tcp_rmem="4096 87380 16777216"
|
||||
# net.ipv4.tcp_rmem = 4096 87380 16777216
|
||||
regex = re.compile(r'^{0}\s+=\s+{1}$'.format(re.escape(name), re.escape(value)))
|
||||
|
||||
if not regex.match(out):
|
||||
if data['retcode'] != 0 and data['stderr']:
|
||||
error = data['stderr']
|
||||
if not regex.match(out) or 'Invalid argument' in str(err):
|
||||
if data['retcode'] != 0 and err:
|
||||
error = err
|
||||
else:
|
||||
error = out
|
||||
raise CommandExecutionError('sysctl -w failed: {0}'.format(error))
|
||||
|
@ -466,7 +466,7 @@ def _get_network_conf(conf_tuples=None, **kwargs):
|
||||
new[iface]['lxc.network.hwaddr'] = omac
|
||||
|
||||
ret = []
|
||||
for v in new.values():
|
||||
for v in new.itervalues():
|
||||
for row in v:
|
||||
ret.append({row: v[row]})
|
||||
return ret
|
||||
@ -2332,7 +2332,7 @@ def write_conf(conf_file, conf):
|
||||
if isinstance(line, str):
|
||||
fp_.write(line)
|
||||
elif isinstance(line, dict):
|
||||
key = line.keys()[0]
|
||||
key = line.iterkeys().next()
|
||||
out_line = None
|
||||
if isinstance(line[key], str):
|
||||
out_line = ' = '.join((key, line[key]))
|
||||
@ -2378,7 +2378,7 @@ def edit_conf(conf_file, out_format='simple', **kwargs):
|
||||
data.append(line)
|
||||
continue
|
||||
else:
|
||||
key = line.keys()[0]
|
||||
key = line.iterkeys().next()
|
||||
if key not in kwargs:
|
||||
data.append(line)
|
||||
continue
|
||||
|
@ -315,7 +315,7 @@ def filter_by(lookup, expr_form='compound', minion_id=None):
|
||||
expr_funcs = dict(inspect.getmembers(sys.modules[__name__],
|
||||
predicate=inspect.isfunction))
|
||||
|
||||
for key in lookup.keys():
|
||||
for key in lookup:
|
||||
if minion_id and expr_funcs[expr_form](key, minion_id):
|
||||
return lookup[key]
|
||||
elif expr_funcs[expr_form](key, minion_id):
|
||||
|
@ -6,6 +6,8 @@ Module for Management of Memcached Keys
|
||||
.. versionadded:: 2014.1.0
|
||||
'''
|
||||
|
||||
# TODO: use salt.utils.memcache
|
||||
|
||||
# Import python libs
|
||||
import logging
|
||||
|
||||
|
@ -571,7 +571,7 @@ def query(database, query, **connection_args):
|
||||
# into Python objects. It leaves them as strings.
|
||||
orig_conv = MySQLdb.converters.conversions
|
||||
conv_iter = iter(orig_conv)
|
||||
conv = dict(zip(conv_iter, [str] * len(orig_conv.keys())))
|
||||
conv = dict(zip(conv_iter, [str] * len(orig_conv)))
|
||||
# some converters are lists, do not break theses
|
||||
conv[FIELD_TYPE.BLOB] = [
|
||||
(FLAG.BINARY, str),
|
||||
@ -1461,7 +1461,7 @@ def __ssl_option_sanitize(ssl_option):
|
||||
|
||||
# Like most other "salt dsl" YAML structures, ssl_option is a list of single-element dicts
|
||||
for opt in ssl_option:
|
||||
key = opt.keys()[0]
|
||||
key = opt.iterkeys().next()
|
||||
value = opt[key]
|
||||
|
||||
normal_key = key.strip().upper()
|
||||
|
@ -63,7 +63,7 @@ def _execute_pillar(pillar_name, run_type):
|
||||
#Check if is a dict to get the arguments
|
||||
#in command if not set the arguments to empty string
|
||||
if isinstance(command, dict):
|
||||
plugin = command.keys()[0]
|
||||
plugin = command.iterkeys().next()
|
||||
args = command[plugin]
|
||||
else:
|
||||
plugin = command
|
||||
@ -165,7 +165,7 @@ def retcode_pillar(pillar_name):
|
||||
#Check if is a dict to get the arguments
|
||||
#in command if not set the arguments to empty string
|
||||
if isinstance(command, dict):
|
||||
plugin = command.keys()[0]
|
||||
plugin = command.iterkeys().next()
|
||||
args = command[plugin]
|
||||
else:
|
||||
plugin = command
|
||||
|
@ -262,6 +262,137 @@ def _netstat_bsd():
|
||||
return ret
|
||||
|
||||
|
||||
def _netstat_route_linux():
|
||||
'''
|
||||
Return netstat routing information for Linux distros
|
||||
'''
|
||||
ret = []
|
||||
cmd = 'netstat -A inet -rn | tail -n+3'
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
ret.append({
|
||||
'addr_family': 'inet',
|
||||
'destination': comps[0],
|
||||
'gateway': comps[1],
|
||||
'netmask': comps[2],
|
||||
'flags': comps[3],
|
||||
'interface': comps[7]})
|
||||
cmd = 'netstat -A inet6 -rn | tail -n+3'
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
if len(comps) == 6:
|
||||
ret.append({
|
||||
'addr_family': 'inet6',
|
||||
'destination': comps[0],
|
||||
'gateway': comps[1],
|
||||
'netmask': '',
|
||||
'flags': comps[3],
|
||||
'interface': comps[5]})
|
||||
elif len(comps) == 7:
|
||||
ret.append({
|
||||
'addr_family': 'inet6',
|
||||
'destination': comps[0],
|
||||
'gateway': comps[1],
|
||||
'netmask': '',
|
||||
'flags': comps[3],
|
||||
'interface': comps[6]})
|
||||
else:
|
||||
continue
|
||||
return ret
|
||||
|
||||
|
||||
def _netstat_route_freebsd():
|
||||
'''
|
||||
Return netstat routing information for FreeBSD and OS X
|
||||
'''
|
||||
ret = []
|
||||
cmd = 'netstat -f inet -rn | tail -n+5'
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
ret.append({
|
||||
'addr_family': 'inet',
|
||||
'destination': comps[0],
|
||||
'gateway': comps[1],
|
||||
'netmask': comps[2],
|
||||
'flags': comps[3],
|
||||
'interface': comps[5]})
|
||||
cmd = 'netstat -f inet6 -rn | tail -n+5'
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
ret.append({
|
||||
'addr_family': 'inet6',
|
||||
'destination': comps[0],
|
||||
'gateway': comps[1],
|
||||
'netmask': '',
|
||||
'flags': comps[2],
|
||||
'interface': comps[3]})
|
||||
return ret
|
||||
|
||||
|
||||
def _netstat_route_netbsd():
|
||||
'''
|
||||
Return netstat routing information for NetBSD
|
||||
'''
|
||||
ret = []
|
||||
cmd = 'netstat -f inet -rn | tail -n+5'
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
ret.append({
|
||||
'addr_family': 'inet',
|
||||
'destination': comps[0],
|
||||
'gateway': comps[1],
|
||||
'netmask': '',
|
||||
'flags': comps[3],
|
||||
'interface': comps[6]})
|
||||
cmd = 'netstat -f inet6 -rn | tail -n+5'
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
ret.append({
|
||||
'addr_family': 'inet6',
|
||||
'destination': comps[0],
|
||||
'gateway': comps[1],
|
||||
'netmask': '',
|
||||
'flags': comps[3],
|
||||
'interface': comps[6]})
|
||||
return ret
|
||||
|
||||
|
||||
def _netstat_route_openbsd():
|
||||
'''
|
||||
Return netstat routing information for OpenBSD
|
||||
'''
|
||||
ret = []
|
||||
cmd = 'netstat -f inet -rn | tail -n+5'
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
ret.append({
|
||||
'addr_family': 'inet',
|
||||
'destination': comps[0],
|
||||
'gateway': comps[1],
|
||||
'netmask': '',
|
||||
'flags': comps[2],
|
||||
'interface': comps[7]})
|
||||
cmd = 'netstat -f inet6 -rn | tail -n+5'
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
for line in out.splitlines():
|
||||
comps = line.split()
|
||||
ret.append({
|
||||
'addr_family': 'inet6',
|
||||
'destination': comps[0],
|
||||
'gateway': comps[1],
|
||||
'netmask': '',
|
||||
'flags': comps[2],
|
||||
'interface': comps[7]})
|
||||
return ret
|
||||
|
||||
|
||||
def netstat():
|
||||
'''
|
||||
Return information on open ports and states
|
||||
@ -854,3 +985,71 @@ def mod_bufsize(iface, *args, **kwargs):
|
||||
return _mod_bufsize_linux(iface, *args, **kwargs)
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def routes(family=None):
|
||||
'''
|
||||
Return currently configured routes from routing table
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' network.routes
|
||||
'''
|
||||
if family != 'inet' and family != 'inet6' and family is not None:
|
||||
raise CommandExecutionError('Invalid address family {0}'.format(family))
|
||||
|
||||
if __grains__['kernel'] == 'Linux':
|
||||
routes = _netstat_route_linux()
|
||||
elif __grains__['os'] in ['FreeBSD', 'MacOS', 'Darwin']:
|
||||
routes = _netstat_route_freebsd()
|
||||
elif __grains__['os'] in ['NetBSD']:
|
||||
routes = _netstat_route_netbsd()
|
||||
elif __grains__['os'] in ['OpenBSD']:
|
||||
routes = _netstat_route_openbsd()
|
||||
else:
|
||||
raise CommandExecutionError('Not yet supported on this platform')
|
||||
|
||||
if not family:
|
||||
return routes
|
||||
else:
|
||||
ret = []
|
||||
for route in routes:
|
||||
if route['addr_family'] == family:
|
||||
ret.append(route)
|
||||
return ret
|
||||
|
||||
|
||||
def default_route(family=None):
|
||||
'''
|
||||
Return default route(s) from routing table
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt '*' network.default_route
|
||||
'''
|
||||
|
||||
if family != 'inet' and family != 'inet6' and family is not None:
|
||||
raise CommandExecutionError('Invalid address family {0}'.format(family))
|
||||
|
||||
_routes = routes()
|
||||
default_route = {}
|
||||
if __grains__['kernel'] == 'Linux':
|
||||
default_route['inet'] = ['0.0.0.0', 'default']
|
||||
default_route['inet6'] = ['::/0', 'default']
|
||||
elif __grains__['os'] in ['FreeBSD', 'NetBSD', 'OpenBSD', 'MacOS', 'Darwin']:
|
||||
default_route['inet'] = ['default']
|
||||
default_route['inet6'] = ['default']
|
||||
else:
|
||||
raise CommandExecutionError('Not yet supported on this platform')
|
||||
|
||||
ret = []
|
||||
for route in _routes:
|
||||
if family:
|
||||
if route['destination'] in default_route[family]:
|
||||
ret.append(route)
|
||||
else:
|
||||
if route['destination'] in default_route['inet'] or \
|
||||
route['destination'] in default_route['inet6']:
|
||||
ret.append(route)
|
||||
|
||||
return ret
|
||||
|
@ -590,5 +590,5 @@ def owner(*paths):
|
||||
for path in paths:
|
||||
ret[path] = __salt__['cmd.run_stdout'](cmd.format(path))
|
||||
if len(ret) == 1:
|
||||
return ret.values()[0]
|
||||
return ret.itervalues().next()
|
||||
return ret
|
||||
|
@ -163,7 +163,7 @@ def version(*names, **kwargs):
|
||||
for name in names:
|
||||
if '*' in name:
|
||||
pkg_glob = True
|
||||
for match in fnmatch.filter(pkgs.keys(), name):
|
||||
for match in fnmatch.filter(pkgs, name):
|
||||
ret[match] = pkgs.get(match, [])
|
||||
else:
|
||||
ret[name] = pkgs.get(name, [])
|
||||
@ -173,8 +173,8 @@ def version(*names, **kwargs):
|
||||
# return dict
|
||||
if len(ret) == 1 and not pkg_glob:
|
||||
try:
|
||||
return ret.values()[0]
|
||||
except IndexError:
|
||||
return ret.itervalues().next()
|
||||
except StopIteration:
|
||||
return ''
|
||||
return ret
|
||||
|
||||
@ -210,7 +210,7 @@ def sort_pkglist(pkgs):
|
||||
# It doesn't matter that ['4.9','4.10'] would be sorted to ['4.10','4.9'],
|
||||
# so long as the sorting is consistent.
|
||||
try:
|
||||
for key in pkgs.keys():
|
||||
for key in pkgs:
|
||||
# Passing the pkglist to set() also removes duplicate version
|
||||
# numbers (if present).
|
||||
pkgs[key] = sorted(set(pkgs[key]))
|
||||
@ -230,7 +230,7 @@ def stringify(pkgs):
|
||||
salt '*' pkg_resource.stringify 'vim: 7.127'
|
||||
'''
|
||||
try:
|
||||
for key in pkgs.keys():
|
||||
for key in pkgs:
|
||||
pkgs[key] = ','.join(pkgs[key])
|
||||
except AttributeError as e:
|
||||
log.exception(e)
|
||||
|
@ -508,7 +508,7 @@ def file_list(package):
|
||||
'''
|
||||
ret = file_dict(package)
|
||||
files = []
|
||||
for pkg_files in ret['files'].values():
|
||||
for pkg_files in ret['files'].itervalues():
|
||||
files.extend(pkg_files)
|
||||
ret['files'] = files
|
||||
return ret
|
||||
|
@ -152,7 +152,7 @@ def add_user(name, password=None, runas=None):
|
||||
# Now, Clear the random password from the account, if necessary
|
||||
res2 = clear_password(name, runas)
|
||||
|
||||
if 'Error' in res2.keys():
|
||||
if 'Error' in res2:
|
||||
# Clearing the password failed. We should try to cleanup
|
||||
# and rerun and error.
|
||||
delete_user(name, runas)
|
||||
|
@ -100,7 +100,7 @@ def verify(*package, **kwargs):
|
||||
fname = line[13:]
|
||||
if line[11:12] in ftypes:
|
||||
fdict['type'] = ftypes[line[11:12]]
|
||||
if 'type' not in fdict.keys() or fdict['type'] not in ignore_types:
|
||||
if 'type' not in fdict or fdict['type'] not in ignore_types:
|
||||
if line[0:1] == 'S':
|
||||
fdict['mismatch'].append('size')
|
||||
if line[1:2] == 'M':
|
||||
@ -175,7 +175,7 @@ def file_dict(*packages):
|
||||
continue
|
||||
comps = line.split()
|
||||
pkgs[comps[0]] = {'version': comps[1]}
|
||||
for pkg in pkgs.keys():
|
||||
for pkg in pkgs:
|
||||
files = []
|
||||
cmd = 'rpm -ql {0}'.format(pkg)
|
||||
out = __salt__['cmd.run'](cmd, output_loglevel='trace')
|
||||
|
@ -62,7 +62,7 @@ def list_(show_all=False, return_yaml=True):
|
||||
if 'schedule' in __pillar__:
|
||||
schedule.update(__pillar__['schedule'])
|
||||
|
||||
for job in schedule.keys():
|
||||
for job in schedule:
|
||||
if job == 'enabled':
|
||||
continue
|
||||
|
||||
@ -72,7 +72,7 @@ def list_(show_all=False, return_yaml=True):
|
||||
del schedule[job]
|
||||
continue
|
||||
|
||||
for item in schedule[job].keys():
|
||||
for item in schedule[job]:
|
||||
if item not in SCHEDULE_CONF:
|
||||
del schedule[job][item]
|
||||
continue
|
||||
@ -81,7 +81,7 @@ def list_(show_all=False, return_yaml=True):
|
||||
if schedule[job][item] == 'false':
|
||||
schedule[job][item] = False
|
||||
|
||||
if '_seconds' in schedule[job].keys():
|
||||
if '_seconds' in schedule[job]:
|
||||
schedule[job]['seconds'] = schedule[job]['_seconds']
|
||||
del schedule[job]['_seconds']
|
||||
|
||||
@ -114,7 +114,7 @@ def purge(**kwargs):
|
||||
if 'schedule' in __pillar__:
|
||||
schedule.update(__pillar__['schedule'])
|
||||
|
||||
for name in schedule.keys():
|
||||
for name in schedule:
|
||||
if name == 'enabled':
|
||||
continue
|
||||
if name.startswith('__'):
|
||||
|
42
salt/modules/sdb.py
Normal file
42
salt/modules/sdb.py
Normal file
@ -0,0 +1,42 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Module for Manipulating Data via the Salt DB API
|
||||
================================================
|
||||
'''
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.sdb
|
||||
|
||||
|
||||
__func_alias__ = {
|
||||
'set_': 'set',
|
||||
}
|
||||
|
||||
|
||||
def get(uri):
|
||||
'''
|
||||
Get a value from a db, using a uri in the form of sdb://<profile>/<key>. If
|
||||
the uri provided does not start with sdb://, then it will be returned as-is.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' sdb.get sdb://mymemcached/foo
|
||||
'''
|
||||
return salt.utils.sdb.sdb_get(uri, __opts__)
|
||||
|
||||
|
||||
def set_(uri, value):
|
||||
'''
|
||||
Set a value in a db, using a uri in the form of ``sdb://<profile>/<key>``.
|
||||
If the uri provided does not start with ``sdb://`` or the value is not
|
||||
succesfully set, return ``False``.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' sdb.set sdb://mymemcached/foo bar
|
||||
'''
|
||||
return salt.utils.sdb.sdb_set(uri, value, __opts__)
|
@ -70,14 +70,14 @@ def getenforce():
|
||||
|
||||
salt '*' selinux.getenforce
|
||||
'''
|
||||
enforce = os.path.join(selinux_fs_path(), 'enforce')
|
||||
try:
|
||||
enforce = os.path.join(selinux_fs_path(), 'enforce')
|
||||
with salt.utils.fopen(enforce, 'r') as _fp:
|
||||
if _fp.readline().strip() == '0':
|
||||
return 'Permissive'
|
||||
else:
|
||||
return 'Enforcing'
|
||||
except (IOError, OSError) as exc:
|
||||
except (IOError, OSError, AttributeError) as exc:
|
||||
msg = 'Could not read SELinux enforce file: {0}'
|
||||
raise CommandExecutionError(msg.format(str(exc)))
|
||||
|
||||
|
@ -36,6 +36,9 @@ __outputter__ = {
|
||||
'template_str': 'highstate',
|
||||
}
|
||||
|
||||
__func_alias__ = {
|
||||
'apply_': 'apply'
|
||||
}
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -245,6 +248,153 @@ def template_str(tem, queue=False, **kwargs):
|
||||
return ret
|
||||
|
||||
|
||||
def apply_(mods=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Apply states! This function will call highstate or state.sls based on the
|
||||
arguments passed in, state.apply is intended to be the main gateway for
|
||||
all state executions.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.apply
|
||||
salt '*' state.apply test
|
||||
salt '*' state.apply test,pkgs
|
||||
'''
|
||||
if mods:
|
||||
return sls(mods, **kwargs)
|
||||
return highstate(**kwargs)
|
||||
|
||||
|
||||
def request(mods=None,
|
||||
**kwargs):
|
||||
'''
|
||||
Request that the local admin execute a state run via
|
||||
`salt-callstate.apply_request`
|
||||
All arguments match state.apply
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.request
|
||||
salt '*' state.request test
|
||||
salt '*' state.request test,pkgs
|
||||
'''
|
||||
kwargs['test'] = True
|
||||
ret = apply_(mods, **kwargs)
|
||||
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
|
||||
serial = salt.payload.Serial(__opts__)
|
||||
req = check_request()
|
||||
req.update({kwargs.get('name', 'default'): {
|
||||
'test_run': ret,
|
||||
'mods': mods,
|
||||
'kwargs': kwargs
|
||||
}
|
||||
})
|
||||
cumask = os.umask(077)
|
||||
try:
|
||||
if salt.utils.is_windows():
|
||||
# Make sure cache file isn't read-only
|
||||
__salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
|
||||
with salt.utils.fopen(notify_path, 'w+b') as fp_:
|
||||
serial.dump(req, fp_)
|
||||
except (IOError, OSError):
|
||||
msg = 'Unable to write state request file {0}. Check permission.'
|
||||
log.error(msg.format(notify_path))
|
||||
os.umask(cumask)
|
||||
return ret
|
||||
|
||||
|
||||
def check_request(name=None):
|
||||
'''
|
||||
Return the state request information, if any
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.check_request
|
||||
'''
|
||||
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
|
||||
serial = salt.payload.Serial(__opts__)
|
||||
if os.path.isfile(notify_path):
|
||||
with open(notify_path, 'rb') as fp_:
|
||||
req = serial.load(fp_)
|
||||
if name:
|
||||
return req[name]
|
||||
return req
|
||||
return {}
|
||||
|
||||
|
||||
def clear_request(name=None):
|
||||
'''
|
||||
Clear out the state execution request without executing it
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.clear_request
|
||||
'''
|
||||
notify_path = os.path.join(__opts__['cachedir'], 'req_state.p')
|
||||
serial = salt.payload.Serial(__opts__)
|
||||
if not os.path.isfile(notify_path):
|
||||
return True
|
||||
if not name:
|
||||
try:
|
||||
os.remove(notify_path)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
else:
|
||||
req = check_request()
|
||||
if name in req:
|
||||
req.pop(name)
|
||||
else:
|
||||
return False
|
||||
cumask = os.umask(077)
|
||||
try:
|
||||
if salt.utils.is_windows():
|
||||
# Make sure cache file isn't read-only
|
||||
__salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
|
||||
with salt.utils.fopen(notify_path, 'w+b') as fp_:
|
||||
serial.dump(req, fp_)
|
||||
except (IOError, OSError):
|
||||
msg = 'Unable to write state request file {0}. Check permission.'
|
||||
log.error(msg.format(notify_path))
|
||||
os.umask(cumask)
|
||||
return True
|
||||
|
||||
|
||||
def run_request(name='default', **kwargs):
|
||||
'''
|
||||
Execute the pending state request
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.run_request
|
||||
'''
|
||||
req = check_request()
|
||||
if name not in req:
|
||||
return {}
|
||||
n_req = req[name]
|
||||
if 'mods' not in n_req or 'kwargs' not in n_req:
|
||||
return {}
|
||||
req['kwargs'].update(kwargs)
|
||||
if req:
|
||||
ret = apply_(n_req['mods'], **n_req['kwargs'])
|
||||
try:
|
||||
os.remove(os.path.join(__opts__['cachedir'], 'req_state.p'))
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
return ret
|
||||
return {}
|
||||
|
||||
|
||||
def highstate(test=None,
|
||||
queue=False,
|
||||
**kwargs):
|
||||
|
@ -235,7 +235,7 @@ def returner_doc(*args):
|
||||
returners_ = salt.loader.returners(__opts__, [])
|
||||
docs = {}
|
||||
if not args:
|
||||
for fun in returners_.keys():
|
||||
for fun in returners_:
|
||||
docs[fun] = returners_[fun].__doc__
|
||||
return _strip_rst(docs)
|
||||
|
||||
@ -251,8 +251,9 @@ def returner_doc(*args):
|
||||
else:
|
||||
target_mod = ''
|
||||
if _use_fnmatch:
|
||||
for fun in fnmatch.filter(returners_.keys(), target_mod):
|
||||
docs[fun] = returners_[fun].__doc__
|
||||
for fun in returners_:
|
||||
if fun == module or fun.startswith(target_mod):
|
||||
docs[fun] = returners_[fun].__doc__
|
||||
else:
|
||||
for fun in returners_.keys():
|
||||
if fun == module or fun.startswith(target_mod):
|
||||
@ -747,7 +748,7 @@ def list_returner_functions(*args, **kwargs):
|
||||
returners_ = salt.loader.returners(__opts__, [])
|
||||
if not args:
|
||||
# We're being asked for all functions
|
||||
return sorted(returners_.keys())
|
||||
return sorted(returners_)
|
||||
|
||||
names = set()
|
||||
for module in args:
|
||||
@ -760,8 +761,9 @@ def list_returner_functions(*args, **kwargs):
|
||||
# sysctl
|
||||
module = module + '.' if not module.endswith('.') else module
|
||||
if _use_fnmatch:
|
||||
for func in fnmatch.filter(returners_.keys(), target_mod):
|
||||
names.add(func)
|
||||
for func in returners_:
|
||||
if func.startswith(module):
|
||||
names.add(func)
|
||||
else:
|
||||
for func in returners_.keys():
|
||||
if func.startswith(module):
|
||||
|
@ -447,7 +447,3 @@ def tty(*args, **kwargs): # pylint: disable=W0613
|
||||
salt '*' test.tty pts3 'This is a test'
|
||||
'''
|
||||
return 'ERROR: This function has been moved to cmd.tty'
|
||||
|
||||
|
||||
def assertion(assertion_):
|
||||
assert assertion_
|
||||
|
@ -83,7 +83,7 @@ def _find_utmp():
|
||||
result[os.stat(utmp).st_mtime] = utmp
|
||||
except Exception:
|
||||
pass
|
||||
return result[sorted(result.keys()).pop()]
|
||||
return result[sorted(result).pop()]
|
||||
|
||||
|
||||
def _default_runlevel():
|
||||
|
@ -461,7 +461,7 @@ def _nic_profile(profile_name, hypervisor, **kwargs):
|
||||
elif isinstance(config_data, list):
|
||||
for interface in config_data:
|
||||
if isinstance(interface, dict):
|
||||
if len(interface.keys()) == 1:
|
||||
if len(interface) == 1:
|
||||
append_dict_profile_to_interface_list(interface)
|
||||
else:
|
||||
interfaces.append(interface)
|
||||
@ -551,7 +551,7 @@ def init(name,
|
||||
|
||||
# When using a disk profile extract the sole dict key of the first
|
||||
# array element as the filename for disk
|
||||
disk_name = diskp[0].keys()[0]
|
||||
disk_name = diskp[0].iterkeys().next()
|
||||
disk_type = diskp[0][disk_name]['format']
|
||||
disk_file_name = '{0}.{1}'.format(disk_name, disk_type)
|
||||
|
||||
@ -797,7 +797,7 @@ def get_nics(vm_):
|
||||
# driver, source, and match can all have optional attributes
|
||||
if re.match('(driver|source|address)', v_node.tagName):
|
||||
temp = {}
|
||||
for key in v_node.attributes.keys():
|
||||
for key in v_node.attributes:
|
||||
temp[key] = v_node.getAttribute(key)
|
||||
nic[str(v_node.tagName)] = temp
|
||||
# virtualport needs to be handled separately, to pick up the
|
||||
@ -805,7 +805,7 @@ def get_nics(vm_):
|
||||
if v_node.tagName == 'virtualport':
|
||||
temp = {}
|
||||
temp['type'] = v_node.getAttribute('type')
|
||||
for key in v_node.attributes.keys():
|
||||
for key in v_node.attributes:
|
||||
temp[key] = v_node.getAttribute(key)
|
||||
nic['virtualport'] = temp
|
||||
if 'mac' not in nic:
|
||||
@ -855,7 +855,7 @@ def get_graphics(vm_):
|
||||
for node in doc.getElementsByTagName('domain'):
|
||||
g_nodes = node.getElementsByTagName('graphics')
|
||||
for g_node in g_nodes:
|
||||
for key in g_node.attributes.keys():
|
||||
for key in g_node.attributes:
|
||||
out[key] = g_node.getAttribute(key)
|
||||
return out
|
||||
|
||||
@ -1685,7 +1685,7 @@ def vm_netstats(vm_=None):
|
||||
'tx_errs': 0,
|
||||
'tx_drop': 0
|
||||
}
|
||||
for attrs in nics.values():
|
||||
for attrs in nics.itervalues():
|
||||
if 'target' in attrs:
|
||||
dev = attrs['target']
|
||||
stats = dom.interfaceStats(dev)
|
||||
|
@ -6,6 +6,15 @@ Manage groups on Windows
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
|
||||
|
||||
try:
|
||||
import win32com.client
|
||||
import pythoncom
|
||||
import pywintypes
|
||||
HAS_DEPENDENCIES = True
|
||||
except ImportError:
|
||||
HAS_DEPENDENCIES = False
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'group'
|
||||
|
||||
@ -14,7 +23,10 @@ def __virtual__():
|
||||
'''
|
||||
Set the group module if the kernel is Windows
|
||||
'''
|
||||
return __virtualname__ if salt.utils.is_windows() else False
|
||||
if salt.utils.is_windows() and HAS_DEPENDENCIES:
|
||||
return __virtualname__
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def add(name, gid=None, system=False):
|
||||
@ -27,11 +39,35 @@ def add(name, gid=None, system=False):
|
||||
|
||||
salt '*' group.add foo
|
||||
'''
|
||||
cmd = 'net localgroup {0} /add'.format(name)
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': [],
|
||||
'comment': ''}
|
||||
|
||||
ret = __salt__['cmd.run_all'](cmd)
|
||||
if not info(name):
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
try:
|
||||
compObj = nt.GetObject('', 'WinNT://.,computer')
|
||||
newGroup = compObj.Create('group', name)
|
||||
newGroup.SetInfo()
|
||||
ret['changes'].append((
|
||||
'Successfully created group {0}'
|
||||
).format(name))
|
||||
except pywintypes.com_error as com_err:
|
||||
ret['result'] = False
|
||||
if len(com_err.excepinfo) >= 2:
|
||||
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
|
||||
ret['comment'] = (
|
||||
'Failed to create group {0}. {1}'
|
||||
).format(name, friendly_error)
|
||||
else:
|
||||
ret['result'] = None
|
||||
ret['comment'] = (
|
||||
'The group {0} already exists.'
|
||||
).format(name)
|
||||
|
||||
return not ret['retcode']
|
||||
return ret
|
||||
|
||||
|
||||
def delete(name):
|
||||
@ -44,9 +80,32 @@ def delete(name):
|
||||
|
||||
salt '*' group.delete foo
|
||||
'''
|
||||
ret = __salt__['cmd.run_all']('net localgroup {0} /delete'.format(name))
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': [],
|
||||
'comment': ''}
|
||||
|
||||
return not ret['retcode']
|
||||
if info(name):
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
try:
|
||||
compObj = nt.GetObject('', 'WinNT://.,computer')
|
||||
compObj.Delete('group', name)
|
||||
ret['changes'].append(('Successfully removed group {0}').format(name))
|
||||
except pywintypes.com_error as com_err:
|
||||
ret['result'] = False
|
||||
if len(com_err.excepinfo) >= 2:
|
||||
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
|
||||
ret['comment'] = (
|
||||
'Failed to remove group {0}. {1}'
|
||||
).format(name, friendly_error)
|
||||
else:
|
||||
ret['result'] = None
|
||||
ret['comment'] = (
|
||||
'The group {0} does not exists.'
|
||||
).format(name)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def info(name):
|
||||
@ -59,20 +118,20 @@ def info(name):
|
||||
|
||||
salt '*' group.info foo
|
||||
'''
|
||||
lines = __salt__['cmd.run']('net localgroup {0}'.format(name)).splitlines()
|
||||
memberline = False
|
||||
gr_mem = []
|
||||
gr_name = ''
|
||||
for line in lines:
|
||||
if 'Alias name' in line:
|
||||
comps = line.split(' ', 1)
|
||||
gr_name = comps[1].strip()
|
||||
if 'successfully' in line:
|
||||
memberline = False
|
||||
if memberline:
|
||||
gr_mem.append(line.strip())
|
||||
if '---' in line:
|
||||
memberline = True
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
|
||||
try:
|
||||
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group')
|
||||
gr_name = groupObj.Name
|
||||
gr_mem = []
|
||||
for member in groupObj.members():
|
||||
gr_mem.append(
|
||||
member.ADSPath.replace('WinNT://', '').replace(
|
||||
'/', '\\').encode('ascii', 'backslashreplace'))
|
||||
except pywintypes.com_error:
|
||||
return False
|
||||
|
||||
if not gr_name:
|
||||
return False
|
||||
|
||||
@ -96,33 +155,216 @@ def getent(refresh=False):
|
||||
return __context__['group.getent']
|
||||
|
||||
ret = []
|
||||
ret2 = []
|
||||
lines = __salt__['cmd.run']('net localgroup').splitlines()
|
||||
groupline = False
|
||||
for line in lines:
|
||||
if 'successfully' in line:
|
||||
groupline = False
|
||||
if groupline:
|
||||
ret.append(line.strip('*').strip())
|
||||
if '---' in line:
|
||||
groupline = True
|
||||
for item in ret:
|
||||
members = []
|
||||
gid = __salt__['file.group_to_gid'](item)
|
||||
memberlines = __salt__['cmd.run']('net localgroup "{0}"'.format(item)).splitlines()
|
||||
memberline = False
|
||||
for line in memberlines:
|
||||
if 'successfully' in line:
|
||||
memberline = False
|
||||
if memberline:
|
||||
members.append(line.strip('*').strip())
|
||||
if '---' in line:
|
||||
memberline = True
|
||||
group = {'gid': gid,
|
||||
'members': members,
|
||||
'name': item,
|
||||
'passwd': 'x'}
|
||||
ret2.append(group)
|
||||
|
||||
__context__['group.getent'] = ret2
|
||||
return ret2
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
|
||||
results = nt.GetObject('', 'WinNT://.')
|
||||
results.Filter = ['group']
|
||||
for result in results:
|
||||
member_list = []
|
||||
for member in result.members():
|
||||
member_list.append(
|
||||
member.AdsPath.replace('WinNT://', '').replace(
|
||||
'/', '\\').encode('ascii', 'backslashreplace'))
|
||||
group = {'gid': __salt__['file.group_to_gid'](result.name),
|
||||
'members': member_list,
|
||||
'name': result.name,
|
||||
'passwd': 'x'}
|
||||
ret.append(group)
|
||||
__context__['group.getent'] = ret
|
||||
return ret
|
||||
|
||||
|
||||
def adduser(name, username):
|
||||
'''
|
||||
add a user to a group
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' group.adduser foo username
|
||||
|
||||
'''
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {'Users Added': []},
|
||||
'comment': ''}
|
||||
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group')
|
||||
existingMembers = []
|
||||
for member in groupObj.members():
|
||||
existingMembers.append(
|
||||
member.ADSPath.replace('WinNT://', '').replace(
|
||||
'/', '\\').encode('ascii', 'backslashreplace').lower())
|
||||
|
||||
try:
|
||||
if __fixlocaluser(username.lower()) not in existingMembers:
|
||||
if not __opts__['test']:
|
||||
groupObj.Add('WinNT://' + username.replace('\\', '/'))
|
||||
|
||||
ret['changes']['Users Added'].append(username)
|
||||
else:
|
||||
ret['comment'] = (
|
||||
'User {0} is already a member of {1}'
|
||||
).format(username, name)
|
||||
ret['result'] = None
|
||||
except pywintypes.com_error as com_err:
|
||||
if len(com_err.excepinfo) >= 2:
|
||||
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
|
||||
ret['comment'] = (
|
||||
'Failed to add {0} to group {1}. {2}'
|
||||
).format(username, name, friendly_error)
|
||||
ret['result'] = False
|
||||
return ret
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def deluser(name, username):
|
||||
'''
|
||||
remove a user from a group
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' group.deluser foo username
|
||||
|
||||
'''
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {'Users Removed': []},
|
||||
'comment': ''}
|
||||
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group')
|
||||
existingMembers = []
|
||||
for member in groupObj.members():
|
||||
existingMembers.append(
|
||||
member.ADSPath.replace('WinNT://', '').replace(
|
||||
'/', '\\').encode('ascii', 'backslashreplace').lower())
|
||||
|
||||
try:
|
||||
if __fixlocaluser(username.lower()) in existingMembers:
|
||||
if not __opts__['test']:
|
||||
groupObj.Remove('WinNT://' + username.replace('\\', '/'))
|
||||
|
||||
ret['changes']['Users Removed'].append(username)
|
||||
else:
|
||||
ret['comment'] = (
|
||||
'User {0} is not a member of {1}'
|
||||
).format(username, name)
|
||||
ret['result'] = None
|
||||
except pywintypes.com_error as com_err:
|
||||
if len(com_err.excepinfo) >= 2:
|
||||
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
|
||||
ret['comment'] = (
|
||||
'Failed to remove {0} from group {1}. {2}'
|
||||
).format(username, name, friendly_error)
|
||||
ret['result'] = False
|
||||
return ret
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def members(name, members_list):
|
||||
'''
|
||||
remove a user from a group
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' group.members foo 'user1,user2,user3'
|
||||
|
||||
'''
|
||||
|
||||
ret = {'name': name,
|
||||
'result': True,
|
||||
'changes': {'Users Added': [], 'Users Removed': []},
|
||||
'comment': []}
|
||||
|
||||
members_list = [__fixlocaluser(thisMember) for thisMember in members_list.lower().split(",")]
|
||||
if not isinstance(members_list, list):
|
||||
ret['result'] = False
|
||||
ret['comment'].append('Members is not a list object')
|
||||
return ret
|
||||
|
||||
pythoncom.CoInitialize()
|
||||
nt = win32com.client.Dispatch('AdsNameSpaces')
|
||||
try:
|
||||
groupObj = nt.GetObject('', 'WinNT://./' + name + ',group')
|
||||
except pywintypes.com_error as com_err:
|
||||
if len(com_err.excepinfo) >= 2:
|
||||
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
|
||||
ret['result'] = False
|
||||
ret['comment'].append((
|
||||
'Failure accessing group {0}. {1}'
|
||||
).format(name, friendly_error))
|
||||
return ret
|
||||
existingMembers = []
|
||||
for member in groupObj.members():
|
||||
existingMembers.append(
|
||||
member.ADSPath.replace('WinNT://', '').replace(
|
||||
'/', '\\').encode('ascii', 'backslashreplace').lower())
|
||||
|
||||
existingMembers.sort()
|
||||
members_list.sort()
|
||||
|
||||
if existingMembers == members_list:
|
||||
ret['result'] = None
|
||||
ret['comment'].append(('{0} membership is correct').format(name))
|
||||
return ret
|
||||
|
||||
# add users
|
||||
for member in members_list:
|
||||
if member not in existingMembers:
|
||||
try:
|
||||
if not __opts__['test']:
|
||||
groupObj.Add('WinNT://' + member.replace('\\', '/'))
|
||||
ret['changes']['Users Added'].append(member)
|
||||
except pywintypes.com_error as com_err:
|
||||
if len(com_err.excepinfo) >= 2:
|
||||
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
|
||||
ret['result'] = False
|
||||
ret['comment'].append((
|
||||
'Failed to add {0} to {1}. {2}'
|
||||
).format(member, name, friendly_error))
|
||||
#return ret
|
||||
|
||||
# remove users not in members_list
|
||||
for member in existingMembers:
|
||||
if member not in members_list:
|
||||
try:
|
||||
if not __opts__['test']:
|
||||
groupObj.Remove('WinNT://' + member.replace('\\', '/'))
|
||||
ret['changes']['Users Removed'].append(member)
|
||||
except pywintypes.com_error as com_err:
|
||||
if len(com_err.excepinfo) >= 2:
|
||||
friendly_error = com_err.excepinfo[2].rstrip('\r\n')
|
||||
ret['result'] = False
|
||||
ret['comment'].append((
|
||||
'Failed to remove {0} from {1}. {2}'
|
||||
).format(member, name, friendly_error))
|
||||
#return ret
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def __fixlocaluser(username):
|
||||
'''
|
||||
prefixes a username w/o a backslash with the computername
|
||||
|
||||
i.e. __fixlocaluser('Administrator') would return 'computername\administrator'
|
||||
'''
|
||||
if '\\' not in username:
|
||||
username = ('{0}\\{1}').format(__salt__['grains.get']('host'), username)
|
||||
|
||||
return username.lower()
|
||||
|
@ -62,7 +62,10 @@ def get_servers():
|
||||
cmd = 'w32tm /query /configuration'
|
||||
lines = __salt__['cmd.run'](cmd).splitlines()
|
||||
for line in lines:
|
||||
if 'NtpServer' in line:
|
||||
_, ntpsvrs = line.rstrip(' (Local)').split(':', 1)
|
||||
return sorted(ntpsvrs.split())
|
||||
try:
|
||||
if 'NtpServer' in line:
|
||||
_, ntpsvrs = line.rstrip(' (Local)').split(':', 1)
|
||||
return sorted(ntpsvrs.split())
|
||||
except ValueError as e:
|
||||
return False
|
||||
return False
|
||||
|
@ -758,6 +758,5 @@ def _reverse_cmp_pkg_versions(pkg1, pkg2):
|
||||
|
||||
def _get_latest_pkg_version(pkginfo):
|
||||
if len(pkginfo) == 1:
|
||||
return pkginfo.keys().pop()
|
||||
pkgkeys = pkginfo.keys()
|
||||
return sorted(pkgkeys, cmp=_reverse_cmp_pkg_versions).pop()
|
||||
return pkginfo.iterkeys().next()
|
||||
return sorted(pkginfo, cmp=_reverse_cmp_pkg_versions).pop()
|
||||
|
@ -6,11 +6,17 @@ Windows Service module.
|
||||
# Import python libs
|
||||
import salt.utils
|
||||
from subprocess import list2cmdline
|
||||
import time
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'service'
|
||||
|
||||
BUFFSIZE = 5000
|
||||
SERVICE_STOP_DELAY_SECONDS = 15
|
||||
SERVICE_STOP_POLL_MAX_ATTEMPTS = 5
|
||||
|
||||
|
||||
def __virtual__():
|
||||
@ -220,8 +226,25 @@ def stop(name):
|
||||
|
||||
salt '*' service.stop <service name>
|
||||
'''
|
||||
# net stop issues a stop command and waits briefly (~30s), but will give
|
||||
# up if the service takes too long to stop with a misleading
|
||||
# "service could not be stopped" message and RC 0.
|
||||
|
||||
cmd = list2cmdline(['net', 'stop', name])
|
||||
return not __salt__['cmd.retcode'](cmd)
|
||||
res = __salt__['cmd.run'](cmd)
|
||||
if 'service was stopped' in res:
|
||||
return True
|
||||
|
||||
# we requested a stop, but the service is still thinking about it.
|
||||
# poll for the real status
|
||||
for attempt in range(SERVICE_STOP_POLL_MAX_ATTEMPTS):
|
||||
if not status(name):
|
||||
return True
|
||||
log.debug('Waiting for %s to stop', name)
|
||||
time.sleep(SERVICE_STOP_DELAY_SECONDS)
|
||||
|
||||
log.warning('Giving up on waiting for service `%s` to stop', name)
|
||||
return False
|
||||
|
||||
|
||||
def restart(name):
|
||||
@ -237,8 +260,7 @@ def restart(name):
|
||||
if has_powershell():
|
||||
cmd = 'Restart-Service {0}'.format(name)
|
||||
return not __salt__['cmd.retcode'](cmd, shell='powershell')
|
||||
stop(name)
|
||||
return start(name)
|
||||
return stop(name) and start(name)
|
||||
|
||||
|
||||
def status(name, sig=None):
|
||||
|
@ -269,7 +269,7 @@ class PyWinUpdater(object):
|
||||
'''
|
||||
updates = self.GetInstallationResults()
|
||||
ret = 'The following are the updates and their return codes.\n'
|
||||
for i in updates.keys():
|
||||
for i in updates:
|
||||
ret += '\t{0}\n'.format(updates[i])
|
||||
return ret
|
||||
|
||||
@ -316,8 +316,8 @@ class PyWinUpdater(object):
|
||||
def SetIncludes(self, includes):
|
||||
if includes:
|
||||
for i in includes:
|
||||
value = i[i.keys()[0]]
|
||||
include = i.keys()[0]
|
||||
value = i[i.iterkeys().next()]
|
||||
include = i.iterkeys().next()
|
||||
self.SetInclude(include, value)
|
||||
log.debug('was asked to set {0} to {1}'.format(include, value))
|
||||
|
||||
|
538
salt/modules/xfs.py
Normal file
538
salt/modules/xfs.py
Normal file
@ -0,0 +1,538 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# The MIT License (MIT)
|
||||
# Copyright (C) 2014 SUSE LLC
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to
|
||||
# deal in the Software without restriction, including without limitation the
|
||||
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
# sell copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
'''
|
||||
Module for managing XFS file systems.
|
||||
'''
|
||||
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import logging
|
||||
|
||||
import salt.utils
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only work on POSIX-like systems
|
||||
'''
|
||||
return not salt.utils.is_windows() and __grains__.get('kernel') == 'Linux'
|
||||
|
||||
|
||||
def _verify_run(out, cmd=None):
|
||||
'''
|
||||
Crash to the log if command execution was not successful.
|
||||
'''
|
||||
if out.get("retcode", 0) and out['stderr']:
|
||||
if cmd:
|
||||
log.debug('Command: "{0}"'.format(cmd))
|
||||
|
||||
log.debug('Return code: {0}'.format(out.get('retcode')))
|
||||
log.debug('Error output:\n{0}'.format(out.get('stderr', "N/A")))
|
||||
|
||||
raise CommandExecutionError(out['stderr'])
|
||||
|
||||
|
||||
def _xfs_info_get_kv(serialized):
|
||||
'''
|
||||
Parse one line of the XFS info output.
|
||||
'''
|
||||
# No need to know sub-elements here
|
||||
if serialized.startswith("="):
|
||||
serialized = serialized[1:].strip()
|
||||
|
||||
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
|
||||
|
||||
# Keywords has no spaces, values do
|
||||
opt = []
|
||||
for tkn in serialized.split(" "):
|
||||
if not opt or "=" in tkn:
|
||||
opt.append(tkn)
|
||||
else:
|
||||
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
|
||||
|
||||
# Preserve ordering
|
||||
return [tuple(items.split("=")) for items in opt]
|
||||
|
||||
|
||||
def _parse_xfs_info(data):
|
||||
'''
|
||||
Parse output from "xfs_info" or "xfs_growfs -n".
|
||||
'''
|
||||
ret = {}
|
||||
spr = re.compile(r'\s+')
|
||||
entry = None
|
||||
for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]:
|
||||
if not line:
|
||||
continue
|
||||
nfo = _xfs_info_get_kv(line)
|
||||
if not line.startswith("="):
|
||||
entry = nfo.pop(0)
|
||||
ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]}
|
||||
ret[entry[0]].update(dict(nfo))
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def info(device):
|
||||
'''
|
||||
Get filesystem geometry information.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' xfs.info /dev/sda1
|
||||
'''
|
||||
out = __salt__['cmd.run_all']("xfs_info {0}".format(device))
|
||||
if out.get('stderr'):
|
||||
raise CommandExecutionError(out['stderr'].replace("xfs_info:", "").strip())
|
||||
return _parse_xfs_info(out['stdout'])
|
||||
|
||||
|
||||
def _xfsdump_output(data):
|
||||
'''
|
||||
Parse CLI output of the xfsdump utility.
|
||||
'''
|
||||
out = {}
|
||||
summary = []
|
||||
summary_block = False
|
||||
|
||||
for line in [l.strip() for l in data.split("\n") if l.strip()]:
|
||||
line = re.sub("^xfsdump: ", "", line)
|
||||
if line.startswith("session id:"):
|
||||
out['Session ID'] = line.split(" ")[-1]
|
||||
elif line.startswith("session label:"):
|
||||
out['Session label'] = re.sub("^session label: ", "", line)
|
||||
elif line.startswith("media file size"):
|
||||
out['Media size'] = re.sub(r"^media file size\s+", "", line)
|
||||
elif line.startswith("dump complete:"):
|
||||
out['Dump complete'] = re.sub(r"^dump complete:\s+", "", line)
|
||||
elif line.startswith("Dump Status:"):
|
||||
out['Status'] = re.sub(r"^Dump Status:\s+", "", line)
|
||||
elif line.startswith("Dump Summary:"):
|
||||
summary_block = True
|
||||
continue
|
||||
|
||||
if line.startswith(" ") and summary_block:
|
||||
summary.append(line.strip())
|
||||
elif not line.startswith(" ") and summary_block:
|
||||
summary_block = False
|
||||
|
||||
if summary:
|
||||
out['Summary'] = ' '.join(summary)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def dump(device, destination, level=0, label=None, noerase=None):
|
||||
'''
|
||||
Dump filesystem device to the media (file, tape etc).
|
||||
|
||||
Required parameters:
|
||||
|
||||
* **device**: XFS device, content of which to be dumped.
|
||||
* **destination**: Specifies a dump destination.
|
||||
|
||||
Valid options are:
|
||||
|
||||
* **label**: Label of the dump. Otherwise automatically generated label is used.
|
||||
* **level**: Specifies a dump level of 0 to 9.
|
||||
* **noerase**: Pre-erase media.
|
||||
|
||||
Other options are not used in order to let ``xfsdump`` use its default
|
||||
values, as they are most optimal. See the ``xfsdump(8)`` manpage for
|
||||
a more complete description of these options.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' xfs.dump /dev/sda1 /detination/on/the/client
|
||||
salt '*' xfs.dump /dev/sda1 /detination/on/the/client label='Company accountancy'
|
||||
salt '*' xfs.dump /dev/sda1 /detination/on/the/client noerase=True
|
||||
'''
|
||||
if not salt.utils.which("xfsdump"):
|
||||
raise CommandExecutionError("Utility \"xfsdump\" has to be installed or missing.")
|
||||
|
||||
label = label and label or time.strftime("XFS dump for \"{0}\" of %Y.%m.%d, %H:%M".format(device),
|
||||
time.localtime()).replace("'", '"')
|
||||
cmd = ["xfsdump"]
|
||||
cmd.append("-F") # Force
|
||||
if not noerase:
|
||||
cmd.append("-E") # pre-erase
|
||||
cmd.append("-L '{0}'".format(label)) # Label
|
||||
cmd.append("-l {0}".format(level)) # Dump level
|
||||
cmd.append("-f {0}".format(destination)) # Media destination
|
||||
cmd.append(device) # Device
|
||||
|
||||
cmd = ' '.join(cmd)
|
||||
out = __salt__['cmd.run_all'](cmd)
|
||||
_verify_run(out, cmd=cmd)
|
||||
|
||||
return _xfsdump_output(out['stdout'])
|
||||
|
||||
|
||||
def _xr_to_keyset(line):
|
||||
'''
|
||||
Parse xfsrestore output keyset elements.
|
||||
'''
|
||||
tkns = [elm for elm in line.strip().split(":", 1) if elm]
|
||||
if len(tkns) == 1:
|
||||
return "'{0}': ".format(tkns[0])
|
||||
else:
|
||||
key, val = tkns
|
||||
return "'{0}': '{1}',".format(key.strip(), val.strip())
|
||||
|
||||
|
||||
def _xfs_inventory_output(out):
|
||||
'''
|
||||
Transform xfsrestore inventory data output to a Python dict source and evaluate it.
|
||||
'''
|
||||
data = []
|
||||
out = [line for line in out.split("\n") if line.strip()]
|
||||
|
||||
# No inventory yet
|
||||
if len(out) == 1 and 'restore status' in out[0].lower():
|
||||
return {'restore_status': out[0]}
|
||||
|
||||
ident = 0
|
||||
data.append("{")
|
||||
for line in out[:-1]:
|
||||
if len([elm for elm in line.strip().split(":") if elm]) == 1:
|
||||
n_ident = len(re.sub("[^\t]", "", line))
|
||||
if ident > n_ident:
|
||||
for step in range(ident):
|
||||
data.append("},")
|
||||
ident = n_ident
|
||||
data.append(_xr_to_keyset(line))
|
||||
data.append("{")
|
||||
else:
|
||||
data.append(_xr_to_keyset(line))
|
||||
for step in range(ident + 1):
|
||||
data.append("},")
|
||||
data.append("},")
|
||||
|
||||
# We are evaling into a python dict, a json load
|
||||
# would be safer
|
||||
data = eval('\n'.join(data))[0] # pylint: disable=W0123
|
||||
data['restore_status'] = out[-1]
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def inventory():
|
||||
'''
|
||||
Display XFS dump inventory without restoration.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' xfs.inventory
|
||||
'''
|
||||
out = __salt__['cmd.run_all']("xfsrestore -I")
|
||||
_verify_run(out)
|
||||
|
||||
return _xfs_inventory_output(out['stdout'])
|
||||
|
||||
|
||||
def _xfs_prune_output(out, uuid):
|
||||
'''
|
||||
Parse prune output.
|
||||
'''
|
||||
data = {}
|
||||
cnt = []
|
||||
cutpoint = False
|
||||
for line in [l.strip() for l in out.split("\n") if l]:
|
||||
if line.startswith("-"):
|
||||
if cutpoint:
|
||||
break
|
||||
else:
|
||||
cutpoint = True
|
||||
continue
|
||||
|
||||
if cutpoint:
|
||||
cnt.append(line)
|
||||
|
||||
for kset in [e for e in cnt[1:] if ':' in e]:
|
||||
key, val = [t.strip() for t in kset.split(":", 1)]
|
||||
data[key.lower().replace(" ", "_")] = val
|
||||
|
||||
return data.get('uuid') == uuid and data or {}
|
||||
|
||||
|
||||
def prune_dump(sessionid):
|
||||
'''
|
||||
Prunes the dump session identified by the given session id.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' xfs.prune_dump b74a3586-e52e-4a4a-8775-c3334fa8ea2c
|
||||
|
||||
'''
|
||||
out = __salt__['cmd.run_all']("xfsinvutil -s {0} -F".format(sessionid))
|
||||
_verify_run(out)
|
||||
|
||||
data = _xfs_prune_output(out['stdout'], sessionid)
|
||||
if data:
|
||||
return data
|
||||
|
||||
raise CommandExecutionError("Session UUID \"{0}\" was not found.".format(sessionid))
|
||||
|
||||
|
||||
def _blkid_output(out):
|
||||
'''
|
||||
Parse blkid output.
|
||||
'''
|
||||
flt = lambda data: [el for el in data if el.strip()]
|
||||
data = {}
|
||||
for dev_meta in flt(out.split("\n\n")):
|
||||
dev = {}
|
||||
for items in flt(dev_meta.strip().split("\n")):
|
||||
key, val = items.split("=", 1)
|
||||
dev[key.lower()] = val
|
||||
if dev.pop("type") == "xfs":
|
||||
dev['label'] = dev.get('label')
|
||||
data[dev.pop("devname")] = dev
|
||||
|
||||
mounts = _get_mounts()
|
||||
for device in mounts.keys():
|
||||
if data.get(device):
|
||||
data[device].update(mounts[device])
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def devices():
|
||||
'''
|
||||
Get known XFS formatted devices on the system.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' xfs.devices
|
||||
'''
|
||||
out = __salt__['cmd.run_all']("blkid -o export")
|
||||
_verify_run(out)
|
||||
|
||||
return _blkid_output(out['stdout'])
|
||||
|
||||
|
||||
def _xfs_estimate_output(out):
|
||||
'''
|
||||
Parse xfs_estimate output.
|
||||
'''
|
||||
spc = re.compile(r"\s+")
|
||||
data = {}
|
||||
for line in [l for l in out.split("\n") if l.strip()][1:]:
|
||||
directory, bsize, blocks, megabytes, logsize = spc.sub(" ", line).split(" ")
|
||||
data[directory] = {
|
||||
'block _size': bsize,
|
||||
'blocks': blocks,
|
||||
'megabytes': megabytes,
|
||||
'logsize': logsize,
|
||||
}
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def estimate(path):
|
||||
'''
|
||||
Estimate the space that an XFS filesystem will take.
|
||||
For each directory estimate the space that directory would take
|
||||
if it were copied to an XFS filesystem.
|
||||
Estimation does not cross mount points.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' xfs.estimate /path/to/file
|
||||
salt '*' xfs.estimate /path/to/dir/*
|
||||
'''
|
||||
if not os.path.exists(path):
|
||||
raise CommandExecutionError("Path \"{0}\" was not found.".format(path))
|
||||
|
||||
out = __salt__['cmd.run_all']("xfs_estimate -v {0}".format(path))
|
||||
_verify_run(out)
|
||||
|
||||
return _xfs_estimate_output(out["stdout"])
|
||||
|
||||
|
||||
def mkfs(device, label=None, ssize=None, noforce=None,
|
||||
bso=None, gmo=None, ino=None, lso=None, rso=None, nmo=None, dso=None):
|
||||
'''
|
||||
Create a file system on the specified device. By default wipes out with force.
|
||||
|
||||
General options:
|
||||
|
||||
* **label**: Specify volume label.
|
||||
* **ssize**: Specify the fundamental sector size of the filesystem.
|
||||
* **noforce**: Do not force create filesystem, if disk is already formatted.
|
||||
|
||||
Filesystem geometry options:
|
||||
|
||||
* **bso**: Block size options.
|
||||
* **gmo**: Global metadata options.
|
||||
* **dso**: Data section options. These options specify the location, size,
|
||||
and other parameters of the data section of the filesystem.
|
||||
* **ino**: Inode options to specify the inode size of the filesystem, and other inode allocation parameters.
|
||||
* **lso**: Log section options.
|
||||
* **nmo**: Naming options.
|
||||
* **rso**: Realtime section options.
|
||||
|
||||
See the ``mkfs.xfs(8)`` manpage for a more complete description of corresponding options description.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' xfs.mkfs /dev/sda1
|
||||
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' noforce=True
|
||||
salt '*' xfs.mkfs /dev/sda1 dso='su=32k,sw=6' lso='logdev=/dev/sda2,size=10000b'
|
||||
'''
|
||||
|
||||
getopts = lambda args: dict(((args and ("=" in args)
|
||||
and args or None)) and map(
|
||||
lambda kw: kw.split("="), args.split(",")) or [])
|
||||
cmd = ["mkfs.xfs"]
|
||||
if label:
|
||||
cmd.append("-L")
|
||||
cmd.append("'{0}'".format(label))
|
||||
|
||||
if ssize:
|
||||
cmd.append("-s")
|
||||
cmd.append(ssize)
|
||||
|
||||
for switch, opts in [("-b", bso), ("-m", gmo), ("-n", nmo), ("-i", ino),
|
||||
("-d", dso), ("-l", lso), ("-r", rso)]:
|
||||
try:
|
||||
if getopts(opts):
|
||||
cmd.append(switch)
|
||||
cmd.append(opts)
|
||||
except Exception:
|
||||
raise CommandExecutionError("Wrong parameters \"{0}\" for option \"{1}\"".format(opts, switch))
|
||||
|
||||
if not noforce:
|
||||
cmd.append("-f")
|
||||
cmd.append(device)
|
||||
|
||||
cmd = ' '.join(cmd)
|
||||
out = __salt__['cmd.run_all'](cmd)
|
||||
_verify_run(out, cmd=cmd)
|
||||
|
||||
return _parse_xfs_info(out['stdout'])
|
||||
|
||||
|
||||
def modify(device, label=None, lazy_counting=None, uuid=None):
|
||||
'''
|
||||
Modify parameters of an XFS filesystem.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' xfs.modify /dev/sda1 label='My backup' lazy_counting=False
|
||||
salt '*' xfs.modify /dev/sda1 uuid=False
|
||||
salt '*' xfs.modify /dev/sda1 uuid=True
|
||||
'''
|
||||
if not label and lazy_counting is None and uuid is None:
|
||||
raise CommandExecutionError("Nothing specified for modification for \"{0}\" device".format(device))
|
||||
|
||||
cmd = ['xfs_admin']
|
||||
if label:
|
||||
cmd.append("-L")
|
||||
cmd.append("'{0}'".format(label))
|
||||
|
||||
if lazy_counting is False:
|
||||
cmd.append("-c")
|
||||
cmd.append("0")
|
||||
elif lazy_counting:
|
||||
cmd.append("-c")
|
||||
cmd.append("1")
|
||||
|
||||
if uuid is False:
|
||||
cmd.append("-U")
|
||||
cmd.append("nil")
|
||||
elif uuid:
|
||||
cmd.append("-U")
|
||||
cmd.append("generate")
|
||||
cmd.append(device)
|
||||
|
||||
cmd = ' '.join(cmd)
|
||||
_verify_run(__salt__['cmd.run_all'](cmd), cmd=cmd)
|
||||
|
||||
out = __salt__['cmd.run_all']("blkid -o export {0}".format(device))
|
||||
_verify_run(out)
|
||||
|
||||
return _blkid_output(out['stdout'])
|
||||
|
||||
|
||||
def _get_mounts():
|
||||
'''
|
||||
List mounted filesystems.
|
||||
'''
|
||||
mounts = {}
|
||||
for line in open("/proc/mounts").readlines():
|
||||
device, mntpnt, fstype, options, fs_freq, fs_passno = line.strip().split(" ")
|
||||
if fstype != 'xfs':
|
||||
continue
|
||||
mounts[device] = {
|
||||
'mount_point': mntpnt,
|
||||
'options': options.split(","),
|
||||
}
|
||||
|
||||
return mounts
|
||||
|
||||
|
||||
def defragment(device):
|
||||
'''
|
||||
Defragment mounted XFS filesystem.
|
||||
In order to mount a filesystem, device should be properly mounted and writable.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' xfs.defragment /dev/sda1
|
||||
'''
|
||||
if device == '/':
|
||||
raise CommandExecutionError("Root is not a device.")
|
||||
|
||||
if not _get_mounts().get(device):
|
||||
raise CommandExecutionError("Device \"{0}\" is not mounted".format(device))
|
||||
|
||||
out = __salt__['cmd.run_all']("xfs_fsr {0}".format(device))
|
||||
_verify_run(out)
|
||||
|
||||
return {
|
||||
'log': out['stdout']
|
||||
}
|
@ -965,7 +965,7 @@ def install(name=None,
|
||||
exclude=exclude_arg,
|
||||
branch=branch_arg,
|
||||
gpgcheck='--nogpgcheck' if skip_verify else '',
|
||||
pkg=' '.join(to_reinstall.values()),
|
||||
pkg=' '.join(to_reinstall.itervalues()),
|
||||
)
|
||||
__salt__['cmd.run'](cmd, output_loglevel='trace')
|
||||
|
||||
@ -1843,5 +1843,5 @@ def owner(*paths):
|
||||
if 'not owned' in ret[path].lower():
|
||||
ret[path] = ''
|
||||
if len(ret) == 1:
|
||||
return ret.values()[0]
|
||||
return ret.itervalues().next()
|
||||
return ret
|
||||
|
@ -140,7 +140,9 @@ def lock(path,
|
||||
identifier=None,
|
||||
max_concurrency=1,
|
||||
timeout=None,
|
||||
ephemeral_lease=False):
|
||||
ephemeral_lease=False,
|
||||
force=False, # foricble get the lock regardless of open slots
|
||||
):
|
||||
'''
|
||||
Get lock (with optional timeout)
|
||||
'''
|
||||
@ -151,6 +153,11 @@ def lock(path,
|
||||
identifier,
|
||||
max_leases=max_concurrency,
|
||||
ephemeral_lease=ephemeral_lease)
|
||||
|
||||
# forcibly get the lock regardless of max_concurrency
|
||||
if force:
|
||||
SEMAPHORE_MAP[path].assured_path = True
|
||||
|
||||
# block waiting for lock acquisition
|
||||
if timeout:
|
||||
logging.info('Acquiring lock {0} with timeout={1}'.format(path, timeout))
|
||||
|
@ -1,6 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Package support for openSUSE via the zypper package manager
|
||||
|
||||
:optdepends: - `zypp` Python module. Install with `zypper install python-zypp`
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
@ -8,25 +10,21 @@ import copy
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
import ConfigParser
|
||||
import urlparse
|
||||
from xml.dom import minidom as dom
|
||||
from contextlib import contextmanager as _contextmanager
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
from salt.utils.decorators import depends as _depends
|
||||
from salt.exceptions import (
|
||||
CommandExecutionError, MinionError, SaltInvocationError)
|
||||
CommandExecutionError, MinionError)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
HAS_ZYPP = False
|
||||
LOCKS = "/etc/zypp/locks"
|
||||
|
||||
try:
|
||||
import zypp
|
||||
HAS_ZYPP = True
|
||||
except ImportError:
|
||||
pass
|
||||
ZYPP_HOME = "/etc/zypp"
|
||||
LOCKS = "{0}/locks".format(ZYPP_HOME)
|
||||
REPOS = "{0}/repos.d".format(ZYPP_HOME)
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'pkg'
|
||||
@ -36,8 +34,6 @@ def __virtual__():
|
||||
'''
|
||||
Set the virtual pkg module if the os is openSUSE
|
||||
'''
|
||||
if not HAS_ZYPP:
|
||||
return False
|
||||
if __grains__.get('os_family', '') != 'Suse':
|
||||
return False
|
||||
# Not all versions of Suse use zypper, check that it is available
|
||||
@ -142,7 +138,7 @@ def latest_version(*names, **kwargs):
|
||||
pkginfo[key] = val
|
||||
|
||||
# Ignore if the needed keys weren't found in this iteration
|
||||
if not set(('name', 'version', 'status')) <= set(pkginfo.keys()):
|
||||
if not set(('name', 'version', 'status')) <= set(pkginfo):
|
||||
continue
|
||||
|
||||
status = pkginfo['status'].lower()
|
||||
@ -229,203 +225,35 @@ def list_pkgs(versions_as_list=False, **kwargs):
|
||||
return ret
|
||||
|
||||
|
||||
class _RepoInfo(object):
|
||||
def _get_configured_repos():
|
||||
'''
|
||||
Incapsulate all properties that are dumped in zypp._RepoInfo.dumpOn:
|
||||
http://doc.opensuse.org/projects/libzypp/HEAD/classzypp_1_1RepoInfo.html#a2ba8fdefd586731621435428f0ec6ff1
|
||||
Get all the info about repositories from the configurations.
|
||||
'''
|
||||
repo_types = {}
|
||||
|
||||
if HAS_ZYPP:
|
||||
repo_types = {
|
||||
zypp.RepoType.NONE_e: 'NONE',
|
||||
zypp.RepoType.RPMMD_e: 'rpm-md',
|
||||
zypp.RepoType.YAST2_e: 'yast2',
|
||||
zypp.RepoType.RPMPLAINDIR_e: 'plaindir',
|
||||
}
|
||||
repos_cfg = ConfigParser.ConfigParser()
|
||||
repos_cfg.read([REPOS + "/" + fname for fname in os.listdir(REPOS)])
|
||||
|
||||
def __init__(self, zypp_repo_info=None):
|
||||
self.zypp = zypp_repo_info if zypp_repo_info else zypp.RepoInfo()
|
||||
|
||||
@property
|
||||
def options(self):
|
||||
class_items = self.__class__.__dict__.iteritems()
|
||||
return dict([(k, getattr(self, k)) for k, v in class_items
|
||||
if isinstance(v, property) and k != 'options'
|
||||
and getattr(self, k) not in (None, '')])
|
||||
|
||||
def _check_only_mirrorlist_or_url(self):
|
||||
if all(x in self.options for x in ('mirrorlist', 'url')):
|
||||
raise ValueError(
|
||||
'Only one of \'mirrorlist\' and \'url\' can be specified')
|
||||
|
||||
def _zypp_url(self, url):
|
||||
return zypp.Url(url) if url else zypp.Url()
|
||||
|
||||
@options.setter
|
||||
def options(self, value):
|
||||
for k, v in value.iteritems():
|
||||
setattr(self, k, v)
|
||||
|
||||
@property
|
||||
def alias(self):
|
||||
return self.zypp.alias()
|
||||
|
||||
@alias.setter
|
||||
def alias(self, value):
|
||||
if value:
|
||||
self.zypp.setAlias(value)
|
||||
else:
|
||||
raise ValueError('Alias cannot be empty')
|
||||
|
||||
@property
|
||||
def autorefresh(self):
|
||||
return self.zypp.autorefresh()
|
||||
|
||||
@autorefresh.setter
|
||||
def autorefresh(self, value):
|
||||
self.zypp.setAutorefresh(value)
|
||||
|
||||
@property
|
||||
def enabled(self):
|
||||
return self.zypp.enabled()
|
||||
|
||||
@enabled.setter
|
||||
def enabled(self, value):
|
||||
self.zypp.setEnabled(value)
|
||||
|
||||
@property
|
||||
def gpgcheck(self):
|
||||
return self.zypp.gpgCheck()
|
||||
|
||||
@gpgcheck.setter
|
||||
def gpgcheck(self, value):
|
||||
self.zypp.setGpgCheck(value)
|
||||
|
||||
@property
|
||||
def gpgkey(self):
|
||||
return self.zypp.gpgKeyUrl().asCompleteString()
|
||||
|
||||
@gpgkey.setter
|
||||
def gpgkey(self, value):
|
||||
self.zypp.setGpgKeyUrl(self._zypp_url(value))
|
||||
|
||||
@property
|
||||
def keeppackages(self):
|
||||
return self.zypp.keepPackages()
|
||||
|
||||
@keeppackages.setter
|
||||
def keeppackages(self, value):
|
||||
self.zypp.setKeepPackages(value)
|
||||
|
||||
@property
|
||||
def metadataPath(self):
|
||||
return self.zypp.metadataPath().c_str()
|
||||
|
||||
@metadataPath.setter
|
||||
def metadataPath(self, value):
|
||||
self.zypp.setMetadataPath(value)
|
||||
|
||||
@property
|
||||
def mirrorlist(self):
|
||||
return self.zypp.mirrorListUrl().asCompleteString()
|
||||
|
||||
@mirrorlist.setter
|
||||
def mirrorlist(self, value):
|
||||
self.zypp.setMirrorListUrl(self._zypp_url(value))
|
||||
# self._check_only_mirrorlist_or_url()
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.zypp.name()
|
||||
|
||||
@name.setter
|
||||
def name(self, value):
|
||||
self.zypp.setName(value)
|
||||
|
||||
@property
|
||||
def packagesPath(self):
|
||||
return self.zypp.packagesPath().c_str()
|
||||
|
||||
@packagesPath.setter
|
||||
def packagesPath(self, value):
|
||||
self.zypp.setPackagesPath(self._zypp_url(value))
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return self.zypp.path().c_str()
|
||||
|
||||
@path.setter
|
||||
def path(self, value):
|
||||
self.zypp.setPath(self._zypp_url(value))
|
||||
|
||||
@property
|
||||
def priority(self):
|
||||
return self.zypp.priority()
|
||||
|
||||
@priority.setter
|
||||
def priority(self, value):
|
||||
self.zypp.setPriority(value)
|
||||
|
||||
@property
|
||||
def service(self):
|
||||
return self.zypp.service()
|
||||
|
||||
@service.setter
|
||||
def service(self, value):
|
||||
self.zypp.setService(value)
|
||||
|
||||
@property
|
||||
def targetdistro(self):
|
||||
return self.zypp.targetDistribution()
|
||||
|
||||
@targetdistro.setter
|
||||
def targetdistro(self, value):
|
||||
self.zypp.setTargetDistribution(value)
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return self.repo_types[self.zypp.type().toEnum()]
|
||||
|
||||
@type.setter
|
||||
def type(self, value):
|
||||
self.zypp.setType(next(k for k, v in self.repo_types if v == value))
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
return self.zypp.url().asCompleteString()
|
||||
|
||||
@url.setter
|
||||
def url(self, value):
|
||||
self.zypp.setBaseUrl(self._zypp_url(value))
|
||||
# self._check_only_mirrorlist_or_url()
|
||||
return repos_cfg
|
||||
|
||||
|
||||
@_contextmanager
|
||||
def _try_zypp():
|
||||
def _get_repo_info(alias, repos_cfg=None):
|
||||
'''
|
||||
Convert errors like:
|
||||
'RuntimeError: [|] Repository has no alias defined.'
|
||||
into
|
||||
'ERROR: Repository has no alias defined.'.
|
||||
Get one repo meta-data.
|
||||
'''
|
||||
try:
|
||||
yield
|
||||
except RuntimeError as e:
|
||||
raise CommandExecutionError(re.sub(r'\[.*\] ', '', str(e)))
|
||||
meta = dict((repos_cfg or _get_configured_repos()).items(alias))
|
||||
meta['alias'] = alias
|
||||
for k, v in meta.items():
|
||||
if v in ['0', '1']:
|
||||
meta[k] = int(meta[k]) == 1
|
||||
elif v == 'NONE':
|
||||
meta[k] = None
|
||||
return meta
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
@_depends('zypp')
|
||||
def _get_zypp_repo(repo, **kwargs):
|
||||
'''
|
||||
Get zypp._RepoInfo object by repo alias.
|
||||
'''
|
||||
with _try_zypp():
|
||||
return zypp.RepoManager().getRepositoryInfo(repo)
|
||||
|
||||
|
||||
@_depends('zypp')
|
||||
def get_repo(repo, **kwargs):
|
||||
def get_repo(repo):
|
||||
'''
|
||||
Display a repo.
|
||||
|
||||
@ -435,14 +263,9 @@ def get_repo(repo, **kwargs):
|
||||
|
||||
salt '*' pkg.get_repo alias
|
||||
'''
|
||||
try:
|
||||
r = _RepoInfo(_get_zypp_repo(repo))
|
||||
except CommandExecutionError:
|
||||
return {}
|
||||
return r.options
|
||||
return _get_repo_info(repo)
|
||||
|
||||
|
||||
@_depends('zypp')
|
||||
def list_repos():
|
||||
'''
|
||||
Lists all repos.
|
||||
@ -453,15 +276,15 @@ def list_repos():
|
||||
|
||||
salt '*' pkg.list_repos
|
||||
'''
|
||||
with _try_zypp():
|
||||
ret = {}
|
||||
for r in zypp.RepoManager().knownRepositories():
|
||||
ret[r.alias()] = get_repo(r.alias())
|
||||
return ret
|
||||
repos_cfg = _get_configured_repos()
|
||||
all_repos = {}
|
||||
for alias in repos_cfg.sections():
|
||||
all_repos[alias] = _get_repo_info(alias, repos_cfg=repos_cfg)
|
||||
|
||||
return all_repos
|
||||
|
||||
|
||||
@_depends('zypp')
|
||||
def del_repo(repo, **kwargs):
|
||||
def del_repo(repo):
|
||||
'''
|
||||
Delete a repo.
|
||||
|
||||
@ -470,26 +293,46 @@ def del_repo(repo, **kwargs):
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.del_repo alias
|
||||
salt '*' pkg.del_repo alias
|
||||
'''
|
||||
r = _get_zypp_repo(repo)
|
||||
with _try_zypp():
|
||||
zypp.RepoManager().removeRepository(r)
|
||||
return 'File {1} containing repo {0!r} has been removed.\n'.format(
|
||||
repo, r.path().c_str())
|
||||
repos_cfg = _get_configured_repos()
|
||||
for alias in repos_cfg.sections():
|
||||
if alias == repo:
|
||||
cmd = ('zypper -x --non-interactive rr --loose-auth --loose-query {0}'.format(alias))
|
||||
doc = dom.parseString(__salt__['cmd.run'](cmd, output_loglevel='trace'))
|
||||
msg = doc.getElementsByTagName("message")
|
||||
if doc.getElementsByTagName("progress") and msg:
|
||||
return {
|
||||
repo: True,
|
||||
'message': msg[0].childNodes[0].nodeValue,
|
||||
}
|
||||
|
||||
raise CommandExecutionError('Repository "{0}" not found.'.format(repo))
|
||||
|
||||
|
||||
@_depends('zypp')
|
||||
def mod_repo(repo, **kwargs):
|
||||
'''
|
||||
Modify one or more values for a repo. If the repo does not exist, it will
|
||||
be created, so long as the following values are specified:
|
||||
|
||||
repo
|
||||
repo or alias
|
||||
alias by which the zypper refers to the repo
|
||||
|
||||
url or mirrorlist
|
||||
the URL for zypper to reference
|
||||
|
||||
enabled
|
||||
enable or disable (True or False) repository,
|
||||
but do not remove if disabled.
|
||||
|
||||
refresh
|
||||
enable or disable (True or False) auto-refresh of the repository.
|
||||
|
||||
cache
|
||||
Enable or disable (True or False) RPM files caching.
|
||||
|
||||
gpgcheck
|
||||
Enable or disable (True or False) GOG check for this repository.
|
||||
|
||||
Key/Value pairs may also be removed from a repo's configuration by setting
|
||||
a key to a blank value. Bear in mind that a name cannot be deleted, and a
|
||||
url can only be deleted if a mirrorlist is specified (or vice versa).
|
||||
@ -499,33 +342,91 @@ def mod_repo(repo, **kwargs):
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.mod_repo alias alias=new_alias
|
||||
salt '*' pkg.mod_repo alias enabled=True
|
||||
salt '*' pkg.mod_repo alias url= mirrorlist=http://host.com/
|
||||
'''
|
||||
# Filter out '__pub' arguments, as well as saltenv
|
||||
repo_opts = {}
|
||||
for x in kwargs:
|
||||
if not x.startswith('__') and x not in ('saltenv',):
|
||||
repo_opts[x] = kwargs[x]
|
||||
|
||||
repo_manager = zypp.RepoManager()
|
||||
try:
|
||||
r = _RepoInfo(repo_manager.getRepositoryInfo(repo))
|
||||
new_repo = False
|
||||
except RuntimeError:
|
||||
r = _RepoInfo()
|
||||
r.alias = repo
|
||||
new_repo = True
|
||||
try:
|
||||
r.options = repo_opts
|
||||
except ValueError as e:
|
||||
raise SaltInvocationError(str(e))
|
||||
with _try_zypp():
|
||||
if new_repo:
|
||||
repo_manager.addRepository(r.zypp)
|
||||
else:
|
||||
repo_manager.modifyRepository(repo, r.zypp)
|
||||
return r.options
|
||||
repos_cfg = _get_configured_repos()
|
||||
added = False
|
||||
|
||||
# An attempt to add new one?
|
||||
if repo not in repos_cfg.sections():
|
||||
url = kwargs.get("url", kwargs.get("mirrorlist"))
|
||||
if not url:
|
||||
raise CommandExecutionError(
|
||||
'Repository "{0}" not found and no URL passed to create one.'.format(repo))
|
||||
|
||||
if not urlparse.urlparse(url).scheme:
|
||||
raise CommandExecutionError(
|
||||
'Repository "{0}" not found and passed URL looks wrong.'.format(repo))
|
||||
|
||||
# Is there already such repo under different alias?
|
||||
for alias in repos_cfg.sections():
|
||||
repo_meta = _get_repo_info(alias, repos_cfg=repos_cfg)
|
||||
|
||||
# Complete user URL, in case it is not
|
||||
new_url = urlparse.urlparse(url)
|
||||
if not new_url.path:
|
||||
new_url = urlparse.ParseResult(scheme=new_url.scheme,
|
||||
netloc=new_url.netloc,
|
||||
path='/',
|
||||
params=new_url.params,
|
||||
query=new_url.query,
|
||||
fragment=new_url.fragment)
|
||||
base_url = urlparse.urlparse(repo_meta["baseurl"])
|
||||
|
||||
if new_url == base_url:
|
||||
raise CommandExecutionError(
|
||||
'Repository "{0}" already exists as "{1}".'.format(repo, alias))
|
||||
|
||||
# Add new repo
|
||||
doc = None
|
||||
try:
|
||||
# Try to parse the output and find the error,
|
||||
# but this not always working (depends on Zypper version)
|
||||
doc = dom.parseString(__salt__['cmd.run'](("zypper -x ar {0} '{1}'".format(url, repo)),
|
||||
output_loglevel='trace'))
|
||||
except Exception:
|
||||
# No XML out available, but it is still unknown the state of the result.
|
||||
pass
|
||||
|
||||
if doc:
|
||||
msg_nodes = doc.getElementsByTagName("message")
|
||||
if msg_nodes:
|
||||
msg_node = msg_nodes[0]
|
||||
if msg_node.getAttribute("type") == "error":
|
||||
raise CommandExecutionError(msg_node.childNodes[0].nodeValue)
|
||||
|
||||
# Verify the repository has been added
|
||||
repos_cfg = _get_configured_repos()
|
||||
if repo not in repos_cfg.sections():
|
||||
raise CommandExecutionError(
|
||||
'Failed add new repository "{0}" for unknown reason. Please look into Zypper logs.'.format(repo))
|
||||
added = True
|
||||
|
||||
# Modify added or existing repo according to the options
|
||||
cmd_opt = []
|
||||
|
||||
if "enabled" in kwargs:
|
||||
cmd_opt.append(kwargs["enabled"] and "--enable" or "--disable")
|
||||
|
||||
if "refresh" in kwargs:
|
||||
cmd_opt.append(kwargs["refresh"] and "--refresh" or "--no-refresh")
|
||||
|
||||
if "cache" in kwargs:
|
||||
cmd_opt.append(kwargs["cache"] and "--keep-packages" or "--no-keep-packages")
|
||||
|
||||
if "gpgcheck" in kwargs:
|
||||
cmd_opt.append(kwargs["gpgcheck"] and "--gpgcheck" or "--no-gpgcheck")
|
||||
|
||||
if cmd_opt:
|
||||
__salt__['cmd.run'](("zypper -x mr {0} '{1}'".format(' '.join(cmd_opt), repo)),
|
||||
output_loglevel='trace')
|
||||
|
||||
# If repo nor added neither modified, error should be thrown
|
||||
if not added and not cmd_opt:
|
||||
raise CommandExecutionError('Modification of the repository "{0}" was not specified.'.format(repo))
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def refresh_db():
|
||||
@ -1085,3 +986,29 @@ def list_installed_patterns():
|
||||
salt '*' pkg.list_installed_patterns
|
||||
'''
|
||||
return _get_patterns(installed_only=True)
|
||||
|
||||
|
||||
def search(criteria):
|
||||
'''
|
||||
List known packags, available to the system.
|
||||
|
||||
CLI Examples:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.search <criteria>
|
||||
'''
|
||||
doc = dom.parseString(__salt__['cmd.run'](('zypper --xmlout se {0}'.format(criteria)),
|
||||
output_loglevel='trace'))
|
||||
solvables = doc.getElementsByTagName("solvable")
|
||||
if not solvables:
|
||||
raise CommandExecutionError("No packages found by criteria \"{0}\".".format(criteria))
|
||||
|
||||
out = {}
|
||||
for solvable in [s for s in solvables
|
||||
if s.getAttribute("status") == "not-installed" and
|
||||
s.getAttribute("kind") == "package"]:
|
||||
out[solvable.getAttribute("name")] = {
|
||||
'summary': solvable.getAttribute("summary")
|
||||
}
|
||||
return out
|
||||
|
@ -69,7 +69,7 @@ class SaltInfo(object):
|
||||
minion = self.minions[mid]
|
||||
|
||||
minion.update({'grains': event_info['return']})
|
||||
logger.debug("In process minion grains update with minions={0}".format(self.minions.keys()))
|
||||
logger.debug("In process minion grains update with minions={0}".format(self.minions))
|
||||
self.publish_minions()
|
||||
|
||||
def process_ret_job_event(self, event_data):
|
||||
@ -157,7 +157,7 @@ class SaltInfo(object):
|
||||
if set(salt_data['data'].get('lost', [])):
|
||||
dropped_minions = set(salt_data['data'].get('lost', []))
|
||||
else:
|
||||
dropped_minions = set(self.minions.keys()) - set(salt_data['data'].get('present', []))
|
||||
dropped_minions = set(self.minions) - set(salt_data['data'].get('present', []))
|
||||
|
||||
for minion in dropped_minions:
|
||||
changed = True
|
||||
@ -169,9 +169,9 @@ class SaltInfo(object):
|
||||
logger.debug('got new minions')
|
||||
new_minions = set(salt_data['data'].get('new', []))
|
||||
changed = True
|
||||
elif set(salt_data['data'].get('present', [])) - set(self.minions.keys()):
|
||||
elif set(salt_data['data'].get('present', [])) - set(self.minions):
|
||||
logger.debug('detected new minions')
|
||||
new_minions = set(salt_data['data'].get('present', [])) - set(self.minions.keys())
|
||||
new_minions = set(salt_data['data'].get('present', [])) - set(self.minions)
|
||||
changed = True
|
||||
else:
|
||||
new_minions = []
|
||||
|
@ -18,8 +18,8 @@ def output(data):
|
||||
Rather basic....
|
||||
'''
|
||||
tmp = {}
|
||||
for min_ in data.keys():
|
||||
for process in data[min_].keys():
|
||||
for min_ in data:
|
||||
for process in data[min_]:
|
||||
add = False
|
||||
if data[min_][process]['result'] is False:
|
||||
add = True
|
||||
|
@ -232,8 +232,8 @@ def _format_host(host, data):
|
||||
# Append result counts to end of output
|
||||
colorfmt = u'{0}{1}{2[ENDC]}'
|
||||
rlabel = {True: u'Succeeded', False: u'Failed', None: u'Not Run'}
|
||||
count_max_len = max([len(str(x)) for x in rcounts.values()] or [0])
|
||||
label_max_len = max([len(x) for x in rlabel.values()] or [0])
|
||||
count_max_len = max([len(str(x)) for x in rcounts.itervalues()] or [0])
|
||||
label_max_len = max([len(x) for x in rlabel.itervalues()] or [0])
|
||||
line_max_len = label_max_len + count_max_len + 2 # +2 for ': '
|
||||
hstrs.append(
|
||||
colorfmt.format(
|
||||
@ -295,7 +295,7 @@ def _format_host(host, data):
|
||||
)
|
||||
|
||||
totals = u'{0}\nTotal states run: {1:>{2}}'.format('-' * line_max_len,
|
||||
sum(rcounts.values()),
|
||||
sum(rcounts.itervalues()),
|
||||
line_max_len - 7)
|
||||
hstrs.append(colorfmt.format(colors['CYAN'], totals, colors))
|
||||
|
||||
@ -349,7 +349,7 @@ def _strip_clean(returns):
|
||||
'''
|
||||
rm_tags = []
|
||||
for tag in returns:
|
||||
if not isinstance(tag, dict):
|
||||
if isinstance(tag, dict):
|
||||
continue
|
||||
if returns[tag]['result'] and not returns[tag]['changes']:
|
||||
rm_tags.append(tag)
|
||||
|
@ -38,6 +38,6 @@ def output(data):
|
||||
if 'nics' in vm_data:
|
||||
for mac in vm_data['nics']:
|
||||
out += ' Nic - {0}:\n'.format(mac)
|
||||
out += ' Source: {0}\n'.format(vm_data['nics'][mac]['source'][vm_data['nics'][mac]['source'].keys()[0]])
|
||||
out += ' Source: {0}\n'.format(vm_data['nics'][mac]['source'][vm_data['nics'][mac]['source'].iterkeys().next()])
|
||||
out += ' Type: {0}\n'.format(vm_data['nics'][mac]['type'])
|
||||
return out
|
||||
|
@ -95,7 +95,7 @@ class OverState(object):
|
||||
'''
|
||||
names = set()
|
||||
for comp in self.over:
|
||||
names.add(comp.keys()[0])
|
||||
names.add(comp.iterkeys().next())
|
||||
return names
|
||||
|
||||
def get_stage(self, name):
|
||||
@ -177,7 +177,7 @@ class OverState(object):
|
||||
if isinstance(fun_d, str):
|
||||
fun = fun_d
|
||||
elif isinstance(fun_d, dict):
|
||||
fun = fun_d.keys()[0]
|
||||
fun = fun_d.iterkeys().next()
|
||||
arg = fun_d[fun]
|
||||
else:
|
||||
yield {name: {}}
|
||||
@ -212,7 +212,7 @@ class OverState(object):
|
||||
else:
|
||||
# Req has not be called
|
||||
for comp in self.over:
|
||||
rname = comp.keys()[0]
|
||||
rname = comp.iterkeys().next()
|
||||
if req == rname:
|
||||
rstage = comp[rname]
|
||||
v_stage = self.verify_stage(rstage)
|
||||
@ -263,7 +263,7 @@ class OverState(object):
|
||||
self.over_run = {}
|
||||
|
||||
for comp in self.over:
|
||||
name = comp.keys()[0]
|
||||
name = comp.iterkeys().next()
|
||||
stage = comp[name]
|
||||
if name not in self.over_run:
|
||||
self.call_stage(name, stage)
|
||||
@ -286,7 +286,7 @@ class OverState(object):
|
||||
self.over_run = {}
|
||||
yield self.over
|
||||
for comp in self.over:
|
||||
name = comp.keys()[0]
|
||||
name = comp.iterkeys().next()
|
||||
stage = comp[name]
|
||||
if name not in self.over_run:
|
||||
v_stage = self.verify_stage(stage)
|
||||
@ -296,7 +296,7 @@ class OverState(object):
|
||||
else:
|
||||
for sret in self.call_stage(name, stage):
|
||||
for yret in yielder(sret):
|
||||
sname = yret.keys()[0]
|
||||
sname = yret.iterkeys().next()
|
||||
yield [self.get_stage(sname)]
|
||||
final = {}
|
||||
for minion in yret[sname]:
|
||||
|
@ -94,10 +94,9 @@ class Serial(object):
|
||||
return msgpack.loads(msg, use_list=True)
|
||||
except Exception as exc:
|
||||
log.critical('Could not deserialize msgpack message: {0}'
|
||||
'In an attempt to keep Salt running, returning an empty dict.'
|
||||
'This often happens when trying to read a file not in binary mode.'
|
||||
'Please open an issue and include the following error: {1}'.format(msg, exc))
|
||||
return {}
|
||||
raise
|
||||
|
||||
def load(self, fn_):
|
||||
'''
|
||||
@ -193,7 +192,7 @@ class SREQ(object):
|
||||
'''
|
||||
if hasattr(self, '_socket'):
|
||||
if isinstance(self.poller.sockets, dict):
|
||||
for socket in self.poller.sockets.keys():
|
||||
for socket in self.poller.sockets:
|
||||
self.poller.unregister(socket)
|
||||
else:
|
||||
for socket in self.poller.sockets:
|
||||
@ -235,7 +234,7 @@ class SREQ(object):
|
||||
|
||||
def destroy(self):
|
||||
if isinstance(self.poller.sockets, dict):
|
||||
for socket in self.poller.sockets.keys():
|
||||
for socket in self.poller.sockets:
|
||||
if socket.closed is False:
|
||||
socket.setsockopt(zmq.LINGER, 1)
|
||||
socket.close()
|
||||
|
@ -37,7 +37,7 @@ def merge_aggregate(obj_a, obj_b):
|
||||
|
||||
def merge_overwrite(obj_a, obj_b):
|
||||
for obj in obj_b:
|
||||
if obj in obj_a.keys():
|
||||
if obj in obj_a:
|
||||
obj_a[obj] = obj_b[obj]
|
||||
return obj_a
|
||||
return merge_recurse(obj_a, obj_b)
|
||||
@ -282,7 +282,7 @@ class Pillar(object):
|
||||
'''
|
||||
top = collections.defaultdict(OrderedDict)
|
||||
orders = collections.defaultdict(OrderedDict)
|
||||
for ctops in tops.values():
|
||||
for ctops in tops.itervalues():
|
||||
for ctop in ctops:
|
||||
for saltenv, targets in ctop.items():
|
||||
if saltenv == 'include':
|
||||
@ -306,7 +306,7 @@ class Pillar(object):
|
||||
if isinstance(comp, string_types):
|
||||
states[comp] = True
|
||||
top[saltenv][tgt] = matches
|
||||
top[saltenv][tgt].extend(list(states.keys()))
|
||||
top[saltenv][tgt].extend(states)
|
||||
return self.sort_top_targets(top, orders)
|
||||
|
||||
def sort_top_targets(self, top, orders):
|
||||
@ -316,7 +316,7 @@ class Pillar(object):
|
||||
sorted_top = collections.defaultdict(OrderedDict)
|
||||
# pylint: disable=cell-var-from-loop
|
||||
for saltenv, targets in top.items():
|
||||
sorted_targets = sorted(targets.keys(),
|
||||
sorted_targets = sorted(targets,
|
||||
key=lambda target: orders[saltenv][target])
|
||||
for target in sorted_targets:
|
||||
sorted_top[saltenv][target] = targets[target]
|
||||
|
@ -210,7 +210,7 @@ def ext_pillar(minion_id, # pylint: disable=W0613
|
||||
name_field = model_meta['name']
|
||||
fields = model_meta['fields']
|
||||
|
||||
if 'filter' in model_meta.keys():
|
||||
if 'filter' in model_meta:
|
||||
qs = (model_orm.objects
|
||||
.filter(**model_meta['filter'])
|
||||
.values(*fields))
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user