mirror of
https://github.com/valitydev/salt.git
synced 2024-11-06 16:45:27 +00:00
Merge pull request #45065 from rallytime/merge-develop
[develop] Merge forward from oxygen to develop
This commit is contained in:
commit
51eb07c13f
@ -10,6 +10,7 @@
|
||||
driver:
|
||||
name: docker
|
||||
use_sudo: false
|
||||
hostname: salt
|
||||
privileged: true
|
||||
username: root
|
||||
volume:
|
||||
@ -34,7 +35,7 @@ provisioner:
|
||||
require_chef: false
|
||||
remote_states:
|
||||
name: git://github.com/saltstack/salt-jenkins.git
|
||||
branch: master
|
||||
branch: oxygen
|
||||
repo: git
|
||||
testingdir: /testing
|
||||
salt_copy_filter:
|
||||
|
@ -225,15 +225,16 @@ enclosing brackets ``[`` and ``]``:
|
||||
|
||||
Default: ``{}``
|
||||
|
||||
This can be used to control logging levels more specifically. The example sets
|
||||
the main salt library at the 'warning' level, but sets ``salt.modules`` to log
|
||||
at the ``debug`` level:
|
||||
This can be used to control logging levels more specifically, based on log call name. The example sets
|
||||
the main salt library at the 'warning' level, sets ``salt.modules`` to log
|
||||
at the ``debug`` level, and sets a custom module to the ``all`` level:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
log_granular_levels:
|
||||
'salt': 'warning'
|
||||
'salt.modules': 'debug'
|
||||
'salt.loader.saltmaster.ext.module.custom_module': 'all'
|
||||
|
||||
External Logging Handlers
|
||||
-------------------------
|
||||
|
@ -321,6 +321,20 @@ option on the Salt master.
|
||||
|
||||
master_port: 4506
|
||||
|
||||
.. conf_minion:: publish_port
|
||||
|
||||
``publish_port``
|
||||
---------------
|
||||
|
||||
Default: ``4505``
|
||||
|
||||
The port of the master publish server, this needs to coincide with the publish_port
|
||||
option on the Salt master.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
publish_port: 4505
|
||||
|
||||
.. conf_minion:: source_interface_name
|
||||
|
||||
``source_interface_name``
|
||||
|
@ -80,12 +80,21 @@ same way as in the above example, only without a top-level ``grains:`` key:
|
||||
|
||||
.. note::
|
||||
|
||||
The content of ``/etc/salt/grains`` is ignored if you specify grains in the minion config.
|
||||
Grains in ``/etc/salt/grains`` are ignored if you specify the same grains in the minion config.
|
||||
|
||||
.. note::
|
||||
|
||||
Grains are static, and since they are not often changed, they will need a grains refresh when they are updated. You can do this by calling: ``salt minion saltutil.refresh_modules``
|
||||
|
||||
.. note::
|
||||
|
||||
You can equally configure static grains for Proxy Minions.
|
||||
As multiple Proxy Minion processes can run on the same machine, you need
|
||||
to index the files using the Minion ID, under ``/etc/salt/proxy.d/<minion ID>/grains``.
|
||||
For example, the grains for the Proxy Minion ``router1`` can be defined
|
||||
under ``/etc/salt/proxy.d/router1/grains``, while the grains for the
|
||||
Proxy Minion ``switch7`` can be put in ``/etc/salt/proxy.d/switch7/grains``.
|
||||
|
||||
Matching Grains in the Top File
|
||||
===============================
|
||||
|
||||
@ -278,3 +287,9 @@ Syncing grains can be done a number of ways, they are automatically synced when
|
||||
above) the grains can be manually synced and reloaded by calling the
|
||||
:mod:`saltutil.sync_grains <salt.modules.saltutil.sync_grains>` or
|
||||
:mod:`saltutil.sync_all <salt.modules.saltutil.sync_all>` functions.
|
||||
|
||||
.. note::
|
||||
|
||||
When the :conf_minion:`grains_cache` is set to False, the grains dictionary is built
|
||||
and stored in memory on the minion. Every time the minion restarts or
|
||||
``saltutil.refresh_grains`` is run, the grain dictionary is rebuilt from scratch.
|
||||
|
@ -869,26 +869,33 @@ Example:
|
||||
|
||||
.. note::
|
||||
|
||||
This option may have adverse effects when using the default renderer, ``yaml_jinja``.
|
||||
This is due to the fact that YAML requires proper handling in regard to special
|
||||
characters. Please see the section on :ref:`YAML ASCII support <yaml_plain_ascii>`
|
||||
in the :ref:`YAML Idiosyncracies <yaml-idiosyncrasies>` documentation for more
|
||||
information.
|
||||
This option may have adverse effects when using the default renderer,
|
||||
``yaml_jinja``. This is due to the fact that YAML requires proper handling
|
||||
in regard to special characters. Please see the section on :ref:`YAML ASCII
|
||||
support <yaml_plain_ascii>` in the :ref:`YAML Idiosyncracies
|
||||
<yaml-idiosyncrasies>` documentation for more information.
|
||||
|
||||
.. jinja_ref:: json_decode_list
|
||||
.. jinja_ref:: json_encode_list
|
||||
|
||||
``json_decode_list``
|
||||
``json_encode_list``
|
||||
--------------------
|
||||
|
||||
.. versionadded:: 2017.7.0
|
||||
.. versionadded:: Oxygen
|
||||
Renamed from ``json_decode_list`` to ``json_encode_list``. When you encode
|
||||
something you get bytes, and when you decode, you get your locale's
|
||||
encoding (usually a ``unicode`` type). This filter was incorrectly-named
|
||||
when it was added. ``json_decode_list`` will be supported until the Neon
|
||||
release.
|
||||
|
||||
JSON decodes as unicode, Jinja needs bytes.
|
||||
Recursively encodes all string elements of the list to bytes.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{{ [1, 2, 3] | json_decode_list }}
|
||||
{{ [1, 2, 3] | json_encode_list }}
|
||||
|
||||
Returns:
|
||||
|
||||
@ -898,25 +905,35 @@ Returns:
|
||||
|
||||
|
||||
.. jinja_ref:: json_decode_dict
|
||||
.. jinja_ref:: json_encode_dict
|
||||
|
||||
``json_decode_dict``
|
||||
``json_encode_dict``
|
||||
--------------------
|
||||
|
||||
.. versionadded:: 2017.7.0
|
||||
.. versionadded:: Oxygen
|
||||
Renamed from ``json_decode_dict`` to ``json_encode_dict``. When you encode
|
||||
something you get bytes, and when you decode, you get your locale's
|
||||
encoding (usually a ``unicode`` type). This filter was incorrectly-named
|
||||
when it was added. ``json_decode_dict`` will be supported until the Neon
|
||||
release.
|
||||
|
||||
JSON decodes as unicode, Jinja needs bytes.
|
||||
Recursively encodes all string items in the dictionary to bytes.
|
||||
|
||||
Example:
|
||||
|
||||
Assuming that ``pillar['foo']`` contains ``{u'a': u'\u0414'}``, and your locale
|
||||
is ``en_US.UTF-8``:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
{{ {'a': 'b'} | json_decode_dict }}
|
||||
{{ pillar['foo'] | json_encode_dict }}
|
||||
|
||||
Returns:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
{'a': 'b'}
|
||||
{'a': '\xd0\x94'}
|
||||
|
||||
|
||||
.. jinja_ref:: random_hash
|
||||
@ -927,7 +944,8 @@ Returns:
|
||||
.. versionadded:: 2017.7.0
|
||||
.. versionadded:: Oxygen
|
||||
Renamed from ``rand_str`` to ``random_hash`` to more accurately describe
|
||||
what the filter does.
|
||||
what the filter does. ``rand_str`` will be supported until the Neon
|
||||
release.
|
||||
|
||||
Generates a random number between 1 and the number passed to the filter, and
|
||||
then hashes it. The default hash type is the one specified by the minion's
|
||||
@ -1612,6 +1630,54 @@ Returns:
|
||||
Test supports additional optional arguments: ``ignorecase``, ``multiline``
|
||||
|
||||
|
||||
Escape filters
|
||||
--------------
|
||||
|
||||
.. jinja_ref:: regex_escape
|
||||
|
||||
``regex_escape``
|
||||
----------------
|
||||
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Allows escaping of strings so they can be interpreted literally by another function.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
regex_escape = {{ 'https://example.com?foo=bar%20baz' | regex_escape }}
|
||||
|
||||
will be rendered as:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
regex_escape = https\:\/\/example\.com\?foo\=bar\%20baz
|
||||
|
||||
Set Theory Filters
|
||||
------------------
|
||||
|
||||
.. jinja_ref:: unique
|
||||
|
||||
``unique``
|
||||
----------
|
||||
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
Performs set math using Jinja filters.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: jinja
|
||||
|
||||
unique = {{ ['foo', 'foo', 'bar'] | unique }}
|
||||
|
||||
will be rendered as:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
unique = ['foo', 'bar']
|
||||
|
||||
Jinja in Files
|
||||
==============
|
||||
|
||||
|
@ -211,6 +211,28 @@ localtime.
|
||||
This will schedule the command: ``state.sls httpd test=True`` at 5:00 PM on
|
||||
Monday, Wednesday and Friday, and 3:00 PM on Tuesday and Thursday.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
schedule:
|
||||
job1:
|
||||
function: state.sls
|
||||
args:
|
||||
- httpd
|
||||
kwargs:
|
||||
test: True
|
||||
when:
|
||||
- 'tea time'
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
whens:
|
||||
tea time: 1:40pm
|
||||
deployment time: Friday 5:00pm
|
||||
|
||||
The Salt scheduler also allows custom phrases to be used for the `when`
|
||||
parameter. These `whens` can be stored as either pillar values or
|
||||
grain values.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
schedule:
|
||||
|
@ -128,6 +128,17 @@ by any master tops matches that are not matched via a top file.
|
||||
To make master tops matches execute first, followed by top file matches, set
|
||||
the new :conf_minion:`master_tops_first` minion config option to ``True``.
|
||||
|
||||
Several Jinja Filters Renamed
|
||||
-----------------------------
|
||||
|
||||
The following Jinja filters (originally added in 2017.7.0) have been renamed
|
||||
due to the fact that they were inaccurately named when initially added. The
|
||||
original names will be supported until the Neon release of Salt.
|
||||
|
||||
- :jinja_ref:`rand_str` renamed to :jinja_ref:`random_hash`
|
||||
- :jinja_ref:`jinja_decode_dict` renamed to :jinja_ref:`jinja_encode_dict`
|
||||
- :jinja_ref:`jinja_decode_list` renamed to :jinja_ref:`jinja_encode_list`
|
||||
|
||||
Return Codes for Runner/Wheel Functions
|
||||
---------------------------------------
|
||||
|
||||
@ -222,6 +233,63 @@ The new grains added are:
|
||||
* ``iscsi_iqn``: Show the iSCSI IQN name for a host
|
||||
* ``swap_total``: Show the configured swap_total for Linux, *BSD, OS X and Solaris/SunOS
|
||||
|
||||
Salt Minion Autodiscovery
|
||||
------------------------
|
||||
|
||||
Salt Minion now no longer need to be configured against a specifig DNS name or IP address of a Master.
|
||||
|
||||
For this feature Salt Master now requires port 4520 for UDP broadcast packets to be opened
|
||||
and the Salt Minion be able to send UDP packets to the same port.
|
||||
|
||||
Connection to a type instead of DNS
|
||||
===================================
|
||||
|
||||
By now each Minion was connecting to a Master by DNS or IP address. From now on it is possible
|
||||
also to connect to a _type_ of a Master. For example, in a network there are three different
|
||||
Masters, each corresponds for a particular niche or environment or specific role etc. The Minion
|
||||
is supposed to connect only to one of those Masters that is described approriately.
|
||||
|
||||
To achieve such an effect, each `/etc/salt/master` configuration should have a `discovery` option,
|
||||
which should have a `mapping` element with arbitrary key/value pairs. The same configuration shoul
|
||||
be on the Minion, so then when mapping matches, Minion recognises Master as its connection target.
|
||||
|
||||
Example for Master configuration (`/etc/salt/master`):
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
discovery:
|
||||
mapping:
|
||||
description: SES 5.0
|
||||
node: 1
|
||||
|
||||
The example above describes a system that is running a particular product, where `description` is
|
||||
an arbitrary key and `SES 5.0` is just a string. In order to match exactly this Master, the
|
||||
following configuration at Minion should be present:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
discovery:
|
||||
match: all # Can be "all" or "any"
|
||||
mapping:
|
||||
description: SES 5.0
|
||||
node: 1
|
||||
|
||||
Notice `match` criteria is set to `all`. This would mean that from all found Masters select only
|
||||
that, which `description` is set to `SES 5.0` _and_ `node` is set to `1`. All other Masters will
|
||||
be ignored.
|
||||
|
||||
|
||||
Limitations
|
||||
===========
|
||||
|
||||
This feature has a couple of _temporary_ limitations that are subject to change in the future:
|
||||
|
||||
- Only one Master on the network is supported. Currently the Minion cannot select which Master
|
||||
out of few the same to choose. This will change to choosing the Master that is least loaded.
|
||||
- Minions will accept _any_ master that matches connection criteria without any particular
|
||||
security applied (priv/pub key check, signature, fingerprint etc). That implies that administrator
|
||||
is expected to know his network and make sure it is clean.
|
||||
|
||||
Grains Changes
|
||||
--------------
|
||||
|
||||
@ -244,6 +312,12 @@ New support for Cisco UCS Chassis
|
||||
The salt proxy minion now allows for control of Cisco USC chassis. See
|
||||
the ``cimc`` modules for details.
|
||||
|
||||
New support for Cassandra v3
|
||||
----------------------------
|
||||
|
||||
The ``cassandra_cql`` module now supports Cassandra v3 which has changed
|
||||
its internal schema to define keyspaces and columns.
|
||||
|
||||
New salt-ssh roster
|
||||
-------------------
|
||||
|
||||
|
@ -67,6 +67,13 @@ This would explicitly set the roots to the default:
|
||||
base:
|
||||
- /srv/thorium
|
||||
|
||||
Example ``thorium_roots`` configuration:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
thorium_roots:
|
||||
base:
|
||||
- /etc/salt/thorium
|
||||
|
||||
|
||||
The Thorium top.sls File
|
||||
|
@ -8,6 +8,7 @@
|
||||
}
|
||||
|
||||
/var/log/salt/minion {
|
||||
su root root
|
||||
weekly
|
||||
missingok
|
||||
rotate 7
|
||||
|
@ -4,7 +4,7 @@ Salt package
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import warnings
|
||||
|
||||
# All salt related deprecation warnings should be shown once each!
|
||||
|
@ -330,7 +330,6 @@ class Beacon(object):
|
||||
evt.fire_event({'complete': complete, 'comment': comment,
|
||||
'beacons': self.opts['beacons']},
|
||||
tag='/salt/minion/minion_beacon_modify_complete')
|
||||
|
||||
return True
|
||||
|
||||
def delete_beacon(self, name):
|
||||
|
63
salt/beacons/aix_account.py
Normal file
63
salt/beacons/aix_account.py
Normal file
@ -0,0 +1,63 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Beacon to fire event when we notice a AIX user is locked due to many failed login attempts.
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
:depends: none
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'aix_account'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if kernel is AIX
|
||||
'''
|
||||
if __grains__['kernel'] == ('AIX'):
|
||||
return __virtualname__
|
||||
|
||||
return (False, 'The aix_account beacon module failed to load: '
|
||||
'only available on AIX systems.')
|
||||
|
||||
|
||||
def __validate__(config):
|
||||
'''
|
||||
Validate the beacon configuration
|
||||
'''
|
||||
# Configuration for aix_account beacon should be a dictionary
|
||||
if not isinstance(config, dict):
|
||||
return False, ('Configuration for aix_account beacon must be a dict.')
|
||||
if 'user' not in config:
|
||||
return False, ('Configuration for aix_account beacon must '
|
||||
'include a user or ALL for all users.')
|
||||
return True, 'Valid beacon configuration'
|
||||
|
||||
|
||||
def beacon(config):
|
||||
'''
|
||||
Checks for locked accounts due to too many invalid login attempts, 3 or higher.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
beacons:
|
||||
aix_account:
|
||||
user: ALL
|
||||
interval: 120
|
||||
|
||||
'''
|
||||
|
||||
ret = []
|
||||
|
||||
user = config['user']
|
||||
|
||||
locked_accounts = __salt__['shadow.login_failures'](user)
|
||||
ret.append({'accounts': locked_accounts})
|
||||
|
||||
return ret
|
@ -145,8 +145,10 @@ class BaseCaller(object):
|
||||
print_ret = ret.get('return', {})
|
||||
salt.output.display_output(
|
||||
{'local': print_ret},
|
||||
out,
|
||||
self.opts)
|
||||
out=out,
|
||||
opts=self.opts,
|
||||
_retcode=ret.get('retcode', 0))
|
||||
# _retcode will be available in the kwargs of the outputter function
|
||||
if self.opts.get('retcode_passthrough', False):
|
||||
sys.exit(ret['retcode'])
|
||||
except SaltInvocationError as err:
|
||||
@ -372,8 +374,10 @@ class RAETCaller(BaseCaller):
|
||||
self.process.terminate()
|
||||
salt.output.display_output(
|
||||
{'local': print_ret},
|
||||
ret.get('out', 'nested'),
|
||||
self.opts)
|
||||
out=ret.get('out', 'nested'),
|
||||
opts=self.opts,
|
||||
_retcode=ret.get('retcode', 0))
|
||||
# _retcode will be available in the kwargs of the outputter function
|
||||
if self.opts.get('retcode_passthrough', False):
|
||||
sys.exit(ret['retcode'])
|
||||
|
||||
|
@ -181,7 +181,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
||||
for full_ret in self.local_client.cmd_cli(**kwargs):
|
||||
ret_, out, retcode = self._format_ret(full_ret)
|
||||
ret.update(ret_)
|
||||
self._output_ret(ret, out)
|
||||
self._output_ret(ret, out, retcode=retcode)
|
||||
else:
|
||||
if self.options.verbose:
|
||||
kwargs['verbose'] = True
|
||||
@ -190,7 +190,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
||||
try:
|
||||
ret_, out, retcode = self._format_ret(full_ret)
|
||||
retcodes.append(retcode)
|
||||
self._output_ret(ret_, out)
|
||||
self._output_ret(ret_, out, retcode=retcode)
|
||||
ret.update(full_ret)
|
||||
except KeyError:
|
||||
errors.append(full_ret)
|
||||
@ -212,7 +212,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
||||
|
||||
except (SaltInvocationError, EauthAuthenticationError, SaltClientError) as exc:
|
||||
ret = str(exc)
|
||||
self._output_ret(ret, '')
|
||||
self._output_ret(ret, '', retcode=1)
|
||||
|
||||
def _preview_target(self):
|
||||
'''
|
||||
@ -352,7 +352,7 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
||||
'Requested job was still run but output cannot be displayed.\n')
|
||||
salt.output.update_progress(self.config, progress, self.progress_bar, out)
|
||||
|
||||
def _output_ret(self, ret, out):
|
||||
def _output_ret(self, ret, out, retcode=0):
|
||||
'''
|
||||
Print the output from a single return to the terminal
|
||||
'''
|
||||
@ -362,7 +362,10 @@ class SaltCMD(salt.utils.parsers.SaltCMDOptionParser):
|
||||
self._print_docs(ret)
|
||||
else:
|
||||
# Determine the proper output method and run it
|
||||
salt.output.display_output(ret, out, self.config)
|
||||
salt.output.display_output(ret,
|
||||
out=out,
|
||||
opts=self.config,
|
||||
_retcode=retcode)
|
||||
if not ret:
|
||||
sys.stderr.write('ERROR: No return received\n')
|
||||
sys.exit(2)
|
||||
|
@ -19,7 +19,7 @@ The data structure needs to be:
|
||||
# 4. How long do we wait for all of the replies?
|
||||
#
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, print_function
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import os
|
||||
import time
|
||||
import random
|
||||
@ -38,6 +38,7 @@ import salt.utils.files
|
||||
import salt.utils.jid
|
||||
import salt.utils.minions
|
||||
import salt.utils.platform
|
||||
import salt.utils.stringutils
|
||||
import salt.utils.user
|
||||
import salt.utils.verify
|
||||
import salt.utils.versions
|
||||
@ -196,7 +197,7 @@ class LocalClient(object):
|
||||
key_user,
|
||||
self.skip_perm_errors)
|
||||
with salt.utils.files.fopen(keyfile, 'r') as key:
|
||||
return key.read()
|
||||
return salt.utils.stringutils.to_unicode(key.read())
|
||||
except (OSError, IOError, SaltClientError):
|
||||
# Fall back to eauth
|
||||
return ''
|
||||
@ -1795,7 +1796,7 @@ class LocalClient(object):
|
||||
**kwargs)
|
||||
|
||||
master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \
|
||||
':' + str(self.opts['ret_port'])
|
||||
':' + six.text_type(self.opts['ret_port'])
|
||||
channel = salt.transport.Channel.factory(self.opts,
|
||||
crypt='clear',
|
||||
master_uri=master_uri)
|
||||
@ -1903,7 +1904,7 @@ class LocalClient(object):
|
||||
**kwargs)
|
||||
|
||||
master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \
|
||||
':' + str(self.opts['ret_port'])
|
||||
':' + six.text_type(self.opts['ret_port'])
|
||||
channel = salt.transport.client.AsyncReqChannel.factory(self.opts,
|
||||
io_loop=io_loop,
|
||||
crypt='clear',
|
||||
|
@ -16,7 +16,7 @@ client applications.
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import os
|
||||
|
||||
# Import Salt libs
|
||||
|
@ -4,7 +4,7 @@ A collection of mixins useful for the various *Client interfaces
|
||||
'''
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import, print_function, with_statement
|
||||
from __future__ import absolute_import, print_function, with_statement, unicode_literals
|
||||
import fnmatch
|
||||
import signal
|
||||
import logging
|
||||
@ -245,7 +245,7 @@ class SyncClientMixin(object):
|
||||
|
||||
def low(self, fun, low, print_event=True, full_return=False):
|
||||
'''
|
||||
Check for deprecated usage and allow until Salt Oxygen.
|
||||
Check for deprecated usage and allow until Salt Fluorine.
|
||||
'''
|
||||
msg = []
|
||||
if 'args' in low:
|
||||
@ -256,7 +256,7 @@ class SyncClientMixin(object):
|
||||
low['kwarg'] = low.pop('kwargs')
|
||||
|
||||
if msg:
|
||||
salt.utils.versions.warn_until('Oxygen', ' '.join(msg))
|
||||
salt.utils.versions.warn_until('Fluorine', ' '.join(msg))
|
||||
|
||||
return self._low(fun, low, print_event=print_event, full_return=full_return)
|
||||
|
||||
@ -395,7 +395,7 @@ class SyncClientMixin(object):
|
||||
data['success'] = salt.utils.state.check_result(data['return']['data'])
|
||||
except (Exception, SystemExit) as ex:
|
||||
if isinstance(ex, salt.exceptions.NotImplemented):
|
||||
data['return'] = str(ex)
|
||||
data['return'] = six.text_type(ex)
|
||||
else:
|
||||
data['return'] = 'Exception occurred in {0} {1}: {2}'.format(
|
||||
self.client,
|
||||
|
@ -3,7 +3,7 @@
|
||||
The main entry point for salt-api
|
||||
'''
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import signal
|
||||
import logging
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
'''
|
||||
The client libs to communicate with the salt master when running raet
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
# Import python libs
|
||||
import os
|
||||
|
@ -3,7 +3,7 @@
|
||||
Create ssh executor system
|
||||
'''
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, print_function
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import base64
|
||||
import copy
|
||||
import getpass
|
||||
@ -835,6 +835,7 @@ class Single(object):
|
||||
self.thin_dir = kwargs['thin_dir']
|
||||
elif self.winrm:
|
||||
saltwinshell.set_winvars(self)
|
||||
self.python_env = kwargs.get('ssh_python_env')
|
||||
else:
|
||||
if user:
|
||||
thin_dir = DEFAULT_THIN_DIR.replace('%%USER%%', user)
|
||||
@ -894,6 +895,10 @@ class Single(object):
|
||||
self.serial = salt.payload.Serial(opts)
|
||||
self.wfuncs = salt.loader.ssh_wrapper(opts, None, self.context)
|
||||
self.shell = salt.client.ssh.shell.gen_shell(opts, **args)
|
||||
if self.winrm:
|
||||
# Determine if Windows client is x86 or AMD64
|
||||
arch, _, _ = self.shell.exec_cmd('powershell $ENV:PROCESSOR_ARCHITECTURE')
|
||||
self.arch = arch.strip()
|
||||
self.thin = thin if thin else salt.utils.thin.thin_path(opts['cachedir'])
|
||||
|
||||
def __arg_comps(self):
|
||||
|
@ -1,7 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import os
|
||||
import copy
|
||||
import logging
|
||||
|
@ -2,7 +2,7 @@
|
||||
'''
|
||||
Manage transport commands via ssh
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
# Import python libs
|
||||
import re
|
||||
@ -18,6 +18,8 @@ import salt.defaults.exitcodes
|
||||
import salt.utils.nb_popen
|
||||
import salt.utils.vt
|
||||
|
||||
from salt.ext import six
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
SSH_PASSWORD_PROMPT_RE = re.compile(r'(?:.*)[Pp]assword(?: for .*)?:', re.M)
|
||||
@ -88,7 +90,7 @@ class Shell(object):
|
||||
self.host = host.strip('[]')
|
||||
self.user = user
|
||||
self.port = port
|
||||
self.passwd = str(passwd) if passwd else passwd
|
||||
self.passwd = six.text_type(passwd) if passwd else passwd
|
||||
self.priv = priv
|
||||
self.timeout = timeout
|
||||
self.sudo = sudo
|
||||
|
@ -8,7 +8,7 @@ helper script used by salt.client.ssh.Single. It is here, in a
|
||||
separate file, for convenience of development.
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
import hashlib
|
||||
import tarfile
|
||||
|
@ -2,7 +2,7 @@
|
||||
'''
|
||||
Create ssh executor system
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
# Import python libs
|
||||
import logging
|
||||
import os
|
||||
@ -16,6 +16,8 @@ from contextlib import closing
|
||||
import salt.client.ssh.shell
|
||||
import salt.client.ssh
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
import salt.utils.stringutils
|
||||
import salt.utils.thin
|
||||
import salt.utils.url
|
||||
import salt.utils.verify
|
||||
@ -179,13 +181,13 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
|
||||
[salt.utils.url.create('_utils')],
|
||||
]
|
||||
with salt.utils.files.fopen(lowfn, 'w+') as fp_:
|
||||
fp_.write(json.dumps(chunks))
|
||||
fp_.write(salt.utils.stringutils.to_str(json.dumps(chunks)))
|
||||
if pillar:
|
||||
with salt.utils.files.fopen(pillarfn, 'w+') as fp_:
|
||||
fp_.write(json.dumps(pillar))
|
||||
fp_.write(salt.utils.stringutils.to_str(json.dumps(pillar)))
|
||||
if roster_grains:
|
||||
with salt.utils.files.fopen(roster_grainsfn, 'w+') as fp_:
|
||||
fp_.write(json.dumps(roster_grains))
|
||||
fp_.write(salt.utils.stringutils.to_str(json.dumps(roster_grains)))
|
||||
|
||||
if id_ is None:
|
||||
id_ = ''
|
||||
@ -193,7 +195,7 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
|
||||
cachedir = os.path.join('salt-ssh', id_).rstrip(os.sep)
|
||||
except AttributeError:
|
||||
# Minion ID should always be a str, but don't let an int break this
|
||||
cachedir = os.path.join('salt-ssh', str(id_)).rstrip(os.sep)
|
||||
cachedir = os.path.join('salt-ssh', six.string_types(id_)).rstrip(os.sep)
|
||||
|
||||
for saltenv in file_refs:
|
||||
# Location where files in this saltenv will be cached
|
||||
@ -241,7 +243,7 @@ def prep_trans_tar(opts, file_client, chunks, file_refs, pillar=None, id_=None,
|
||||
cwd = None
|
||||
os.chdir(gendir)
|
||||
with closing(tarfile.open(trans_tar, 'w:gz')) as tfp:
|
||||
for root, dirs, files in os.walk(gendir):
|
||||
for root, dirs, files in salt.utils.path.os_walk(gendir):
|
||||
for name in files:
|
||||
full = os.path.join(root, name)
|
||||
tfp.add(full[len(gendir):].lstrip(os.sep))
|
||||
|
@ -7,7 +7,7 @@ as ZeroMQ salt, but via ssh.
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import json
|
||||
import copy
|
||||
|
||||
|
@ -4,7 +4,7 @@ Return config information
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import re
|
||||
import os
|
||||
|
||||
|
@ -3,13 +3,14 @@
|
||||
Wrap the cp module allowing for managed ssh file transfers
|
||||
'''
|
||||
# Import Python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import logging
|
||||
import os
|
||||
|
||||
# Import salt libs
|
||||
import salt.client.ssh
|
||||
import salt.utils.files
|
||||
import salt.utils.stringutils
|
||||
import salt.utils.templates
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
@ -141,7 +142,7 @@ def _render_filenames(path, dest, saltenv, template):
|
||||
# write out path to temp file
|
||||
tmp_path_fn = salt.utils.files.mkstemp()
|
||||
with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_:
|
||||
fp_.write(contents)
|
||||
fp_.write(salt.utils.stringutils.to_str(contents))
|
||||
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
|
||||
tmp_path_fn,
|
||||
to_str=True,
|
||||
|
@ -4,7 +4,7 @@ Return/control aspects of the grains data
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import collections
|
||||
import copy
|
||||
import math
|
||||
|
@ -7,7 +7,7 @@ Wrapper function for mine operations for salt-ssh
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import copy
|
||||
|
||||
# Import salt libs
|
||||
|
@ -2,7 +2,7 @@
|
||||
'''
|
||||
Extract the pillar data for this minion
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
# Import python libs
|
||||
import collections
|
||||
|
@ -10,7 +10,7 @@ salt-ssh calls and return the data from them.
|
||||
No access control is needed because calls cannot originate from the minions.
|
||||
'''
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import copy
|
||||
import logging
|
||||
|
||||
|
@ -3,21 +3,25 @@
|
||||
Create ssh executor system
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
# Import python libs
|
||||
import os
|
||||
import time
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
|
||||
# Import salt libs
|
||||
from salt.exceptions import SaltInvocationError
|
||||
import salt.client.ssh.shell
|
||||
import salt.client.ssh.state
|
||||
import salt.utils.args
|
||||
import salt.utils.data
|
||||
import salt.utils.files
|
||||
import salt.utils.hashutils
|
||||
import salt.utils.jid
|
||||
import salt.utils.platform
|
||||
import salt.utils.state
|
||||
import salt.utils.thin
|
||||
import salt.roster
|
||||
import salt.state
|
||||
@ -34,6 +38,47 @@ __func_alias__ = {
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _set_retcode(ret, highstate=None):
|
||||
'''
|
||||
Set the return code based on the data back from the state system
|
||||
'''
|
||||
|
||||
# Set default retcode to 0
|
||||
__context__['retcode'] = 0
|
||||
|
||||
if isinstance(ret, list):
|
||||
__context__['retcode'] = 1
|
||||
return
|
||||
if not salt.utils.state.check_result(ret, highstate=highstate):
|
||||
|
||||
__context__['retcode'] = 2
|
||||
|
||||
|
||||
def _check_pillar(kwargs, pillar=None):
|
||||
'''
|
||||
Check the pillar for errors, refuse to run the state if there are errors
|
||||
in the pillar and return the pillar errors
|
||||
'''
|
||||
if kwargs.get('force'):
|
||||
return True
|
||||
pillar_dict = pillar if pillar is not None else __pillar__
|
||||
if '_errors' in pillar_dict:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _wait(jid):
|
||||
'''
|
||||
Wait for all previously started state jobs to finish running
|
||||
'''
|
||||
if jid is None:
|
||||
jid = salt.utils.jid.gen_jid(__opts__)
|
||||
states = _prior_running_states(jid)
|
||||
while states:
|
||||
time.sleep(1)
|
||||
states = _prior_running_states(jid)
|
||||
|
||||
|
||||
def _merge_extra_filerefs(*args):
|
||||
'''
|
||||
Takes a list of filerefs and returns a merged list
|
||||
@ -132,12 +177,106 @@ def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
|
||||
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
|
||||
except Exception as e:
|
||||
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
|
||||
log.error(str(e))
|
||||
log.error(six.text_type(e))
|
||||
|
||||
# If for some reason the json load fails, return the stdout
|
||||
return stdout
|
||||
|
||||
|
||||
def running(concurrent=False):
|
||||
'''
|
||||
Return a list of strings that contain state return data if a state function
|
||||
is already running. This function is used to prevent multiple state calls
|
||||
from being run at the same time.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.running
|
||||
'''
|
||||
ret = []
|
||||
if concurrent:
|
||||
return ret
|
||||
active = __salt__['saltutil.is_running']('state.*')
|
||||
for data in active:
|
||||
err = (
|
||||
'The function "{0}" is running as PID {1} and was started at '
|
||||
'{2} with jid {3}'
|
||||
).format(
|
||||
data['fun'],
|
||||
data['pid'],
|
||||
salt.utils.jid.jid_to_time(data['jid']),
|
||||
data['jid'],
|
||||
)
|
||||
ret.append(err)
|
||||
return ret
|
||||
|
||||
|
||||
def _prior_running_states(jid):
|
||||
'''
|
||||
Return a list of dicts of prior calls to state functions. This function is
|
||||
used to queue state calls so only one is run at a time.
|
||||
'''
|
||||
|
||||
ret = []
|
||||
active = __salt__['saltutil.is_running']('state.*')
|
||||
for data in active:
|
||||
try:
|
||||
data_jid = int(data['jid'])
|
||||
except ValueError:
|
||||
continue
|
||||
if data_jid < int(jid):
|
||||
ret.append(data)
|
||||
return ret
|
||||
|
||||
|
||||
def _check_queue(queue, kwargs):
|
||||
'''
|
||||
Utility function to queue the state run if requested
|
||||
and to check for conflicts in currently running states
|
||||
'''
|
||||
if queue:
|
||||
_wait(kwargs.get('__pub_jid'))
|
||||
else:
|
||||
conflict = running(concurrent=kwargs.get('concurrent', False))
|
||||
if conflict:
|
||||
__context__['retcode'] = 1
|
||||
return conflict
|
||||
|
||||
|
||||
def _get_opts(**kwargs):
|
||||
'''
|
||||
Return a copy of the opts for use, optionally load a local config on top
|
||||
'''
|
||||
opts = copy.deepcopy(__opts__)
|
||||
|
||||
if 'localconfig' in kwargs:
|
||||
return salt.config.minion_config(kwargs['localconfig'], defaults=opts)
|
||||
|
||||
if 'saltenv' in kwargs:
|
||||
saltenv = kwargs['saltenv']
|
||||
if saltenv is not None and not isinstance(saltenv, six.string_types):
|
||||
opts['environment'] = str(kwargs['saltenv'])
|
||||
else:
|
||||
opts['environment'] = kwargs['saltenv']
|
||||
|
||||
if 'pillarenv' in kwargs:
|
||||
pillarenv = kwargs['pillarenv']
|
||||
if pillarenv is not None and not isinstance(pillarenv, six.string_types):
|
||||
opts['pillarenv'] = str(kwargs['pillarenv'])
|
||||
else:
|
||||
opts['pillarenv'] = kwargs['pillarenv']
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def _get_initial_pillar(opts):
|
||||
return __pillar__ if __opts__['__cli'] == 'salt-call' \
|
||||
and opts['pillarenv'] == __opts__['pillarenv'] \
|
||||
else None
|
||||
|
||||
|
||||
def low(data, **kwargs):
|
||||
'''
|
||||
Execute a single low data call
|
||||
@ -208,12 +347,27 @@ def low(data, **kwargs):
|
||||
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
|
||||
except Exception as e:
|
||||
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
|
||||
log.error(str(e))
|
||||
log.error(six.text_type(e))
|
||||
|
||||
# If for some reason the json load fails, return the stdout
|
||||
return stdout
|
||||
|
||||
|
||||
def _get_test_value(test=None, **kwargs):
|
||||
'''
|
||||
Determine the correct value for the test flag.
|
||||
'''
|
||||
ret = True
|
||||
if test is None:
|
||||
if salt.utils.args.test_mode(test=test, **kwargs):
|
||||
ret = True
|
||||
else:
|
||||
ret = __opts__.get('test', None)
|
||||
else:
|
||||
ret = test
|
||||
return ret
|
||||
|
||||
|
||||
def high(data, **kwargs):
|
||||
'''
|
||||
Execute the compound calls stored in a single set of high data
|
||||
@ -281,7 +435,7 @@ def high(data, **kwargs):
|
||||
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
|
||||
except Exception as e:
|
||||
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
|
||||
log.error(str(e))
|
||||
log.error(six.text_type(e))
|
||||
|
||||
# If for some reason the json load fails, return the stdout
|
||||
return stdout
|
||||
@ -367,6 +521,8 @@ def check_request(name=None):
|
||||
serial = salt.payload.Serial(__opts__)
|
||||
if os.path.isfile(notify_path):
|
||||
with salt.utils.files.fopen(notify_path, 'rb') as fp_:
|
||||
# Not sure if this needs to be decoded since it is being returned,
|
||||
# and msgpack serialization will encode it to bytes anyway.
|
||||
req = serial.load(fp_)
|
||||
if name:
|
||||
return req[name]
|
||||
@ -522,7 +678,7 @@ def highstate(test=None, **kwargs):
|
||||
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
|
||||
except Exception as e:
|
||||
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
|
||||
log.error(str(e))
|
||||
log.error(six.text_type(e))
|
||||
|
||||
# If for some reason the json load fails, return the stdout
|
||||
return stdout
|
||||
@ -602,7 +758,7 @@ def top(topfn, test=None, **kwargs):
|
||||
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
|
||||
except Exception as e:
|
||||
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
|
||||
log.error(str(e))
|
||||
log.error(six.text_type(e))
|
||||
|
||||
# If for some reason the json load fails, return the stdout
|
||||
return stdout
|
||||
@ -646,6 +802,99 @@ def show_lowstate():
|
||||
return st_.compile_low_chunks()
|
||||
|
||||
|
||||
def sls_id(id_, mods, test=None, queue=False, **kwargs):
|
||||
'''
|
||||
Call a single ID from the named module(s) and handle all requisites
|
||||
|
||||
The state ID comes *before* the module ID(s) on the command line.
|
||||
|
||||
id
|
||||
ID to call
|
||||
|
||||
mods
|
||||
Comma-delimited list of modules to search for given id and its requisites
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
saltenv : base
|
||||
Specify a salt fileserver environment to be used when applying states
|
||||
|
||||
pillarenv
|
||||
Specify a Pillar environment to be used when applying states. This
|
||||
can also be set in the minion config file using the
|
||||
:conf_minion:`pillarenv` option. When neither the
|
||||
:conf_minion:`pillarenv` minion config option nor this CLI argument is
|
||||
used, all Pillar environments will be merged together.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.sls_id my_state my_module
|
||||
|
||||
salt '*' state.sls_id my_state my_module,a_common_module
|
||||
'''
|
||||
conflict = _check_queue(queue, kwargs)
|
||||
if conflict is not None:
|
||||
return conflict
|
||||
orig_test = __opts__.get('test', None)
|
||||
opts = _get_opts(**kwargs)
|
||||
opts['test'] = _get_test_value(test, **kwargs)
|
||||
|
||||
# Since this is running a specific ID within a specific SLS file, fall back
|
||||
# to the 'base' saltenv if none is configured and none was passed.
|
||||
if opts['environment'] is None:
|
||||
opts['environment'] = 'base'
|
||||
|
||||
try:
|
||||
st_ = salt.state.HighState(opts,
|
||||
proxy=__proxy__,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
__context__['retcode'] = 5
|
||||
err = ['Pillar failed to render with the following messages:']
|
||||
err += __pillar__['_errors']
|
||||
return err
|
||||
|
||||
if isinstance(mods, six.string_types):
|
||||
split_mods = mods.split(',')
|
||||
st_.push_active()
|
||||
try:
|
||||
high_, errors = st_.render_highstate({opts['environment']: split_mods})
|
||||
finally:
|
||||
st_.pop_active()
|
||||
errors += st_.state.verify_high(high_)
|
||||
# Apply requisites to high data
|
||||
high_, req_in_errors = st_.state.requisite_in(high_)
|
||||
if req_in_errors:
|
||||
# This if statement should not be necessary if there were no errors,
|
||||
# but it is required to get the unit tests to pass.
|
||||
errors.extend(req_in_errors)
|
||||
if errors:
|
||||
__context__['retcode'] = 1
|
||||
return errors
|
||||
chunks = st_.state.compile_high_data(high_)
|
||||
ret = {}
|
||||
for chunk in chunks:
|
||||
if chunk.get('__id__', '') == id_:
|
||||
ret.update(st_.state.call_chunk(chunk, {}, chunks))
|
||||
|
||||
_set_retcode(ret, highstate=highstate)
|
||||
# Work around Windows multiprocessing bug, set __opts__['test'] back to
|
||||
# value from before this function was run.
|
||||
__opts__['test'] = orig_test
|
||||
if not ret:
|
||||
raise SaltInvocationError(
|
||||
'No matches for ID \'{0}\' found in SLS \'{1}\' within saltenv '
|
||||
'\'{2}\''.format(id_, mods, opts['environment'])
|
||||
)
|
||||
return ret
|
||||
|
||||
|
||||
def show_sls(mods, saltenv='base', test=None, **kwargs):
|
||||
'''
|
||||
Display the state data from a specific sls or list of sls files on the
|
||||
@ -872,7 +1121,7 @@ def single(fun, name, test=None, **kwargs):
|
||||
return json.loads(stdout, object_hook=salt.utils.data.decode_dict)
|
||||
except Exception as e:
|
||||
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
|
||||
log.error(str(e))
|
||||
log.error(six.text_type(e))
|
||||
|
||||
# If for some reason the json load fails, return the stdout
|
||||
return stdout
|
||||
|
@ -36,6 +36,7 @@ import salt.utils.crypt
|
||||
import salt.utils.data
|
||||
import salt.utils.dictupdate
|
||||
import salt.utils.files
|
||||
import salt.utils.verify
|
||||
import salt.syspaths
|
||||
from salt.template import compile_template
|
||||
|
||||
@ -185,6 +186,10 @@ class CloudClient(object):
|
||||
else:
|
||||
self.opts = salt.config.cloud_config(path)
|
||||
|
||||
# Check the cache-dir exists. If not, create it.
|
||||
v_dirs = [self.opts['cachedir']]
|
||||
salt.utils.verify.verify_env(v_dirs, salt.utils.get_user())
|
||||
|
||||
if pillars:
|
||||
for name, provider in six.iteritems(pillars.pop('providers', {})):
|
||||
driver = provider['driver']
|
||||
|
@ -824,7 +824,7 @@ def query(params=None):
|
||||
|
||||
content = request.text
|
||||
|
||||
result = json.loads(content, object_hook=salt.utils.data.decode_dict)
|
||||
result = json.loads(content, object_hook=salt.utils.data.encode_dict)
|
||||
if 'Code' in result:
|
||||
raise SaltCloudSystemExit(
|
||||
pprint.pformat(result.get('Message', {}))
|
||||
|
@ -49,6 +49,7 @@ Example Provider Configuration
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import pprint
|
||||
import logging
|
||||
@ -58,6 +59,7 @@ from salt.utils.versions import LooseVersion as _LooseVersion
|
||||
|
||||
# Import 3rd-party libs
|
||||
# pylint: disable=import-error
|
||||
LIBCLOUD_IMPORT_ERROR = None
|
||||
try:
|
||||
import libcloud
|
||||
from libcloud.compute.types import Provider
|
||||
@ -78,6 +80,7 @@ try:
|
||||
libcloud.security.CA_CERTS_PATH.append('/etc/ssl/certs/YaST-CA.pem')
|
||||
HAS_LIBCLOUD = True
|
||||
except ImportError:
|
||||
LIBCLOUD_IMPORT_ERROR = sys.exc_info()
|
||||
HAS_LIBCLOUD = False
|
||||
# pylint: enable=import-error
|
||||
|
||||
@ -155,6 +158,9 @@ def get_dependencies():
|
||||
'''
|
||||
Warn if dependencies aren't met.
|
||||
'''
|
||||
if LIBCLOUD_IMPORT_ERROR:
|
||||
log.error("Failure when importing LibCloud: ", exc_info=LIBCLOUD_IMPORT_ERROR)
|
||||
log.error("Note: The libcloud dependency is called 'apache-libcloud' on PyPi/pip.")
|
||||
return config.check_driver_dependencies(
|
||||
__virtualname__,
|
||||
{'libcloud': HAS_LIBCLOUD}
|
||||
|
@ -188,7 +188,7 @@ def query(params=None):
|
||||
log.debug(request.url)
|
||||
|
||||
content = request.text
|
||||
result = json.loads(content, object_hook=salt.utils.data.decode_dict)
|
||||
result = json.loads(content, object_hook=salt.utils.data.encode_dict)
|
||||
|
||||
# print('response:')
|
||||
# pprint.pprint(result)
|
||||
|
@ -48,6 +48,10 @@ log = logging.getLogger(__name__)
|
||||
# The name salt will identify the lib by
|
||||
__virtualname__ = 'virtualbox'
|
||||
|
||||
#if no clone mode is specified in the virtualbox profile
|
||||
#then default to 0 which was the old default value
|
||||
DEFAULT_CLONE_MODE = 0
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
@ -85,6 +89,30 @@ def get_configured_provider():
|
||||
return configured
|
||||
|
||||
|
||||
def map_clonemode(vm_info):
|
||||
"""
|
||||
Convert the virtualbox config file values for clone_mode into the integers the API requires
|
||||
"""
|
||||
mode_map = {
|
||||
'state': 0,
|
||||
'child': 1,
|
||||
'all': 2
|
||||
}
|
||||
|
||||
if not vm_info:
|
||||
return DEFAULT_CLONE_MODE
|
||||
|
||||
if 'clonemode' not in vm_info:
|
||||
return DEFAULT_CLONE_MODE
|
||||
|
||||
if vm_info['clonemode'] in mode_map:
|
||||
return mode_map[vm_info['clonemode']]
|
||||
else:
|
||||
raise SaltCloudSystemExit(
|
||||
"Illegal clonemode for virtualbox profile. Legal values are: {}".format(','.join(mode_map.keys()))
|
||||
)
|
||||
|
||||
|
||||
def create(vm_info):
|
||||
"""
|
||||
Creates a virtual machine from the given VM information.
|
||||
@ -102,6 +130,7 @@ def create(vm_info):
|
||||
profile: <dict>
|
||||
driver: <provider>:<profile>
|
||||
clonefrom: <vm_name>
|
||||
clonemode: <mode> (default: state, choices: state, child, all)
|
||||
}
|
||||
@type vm_info dict
|
||||
@return dict of resulting vm. !!!Passwords can and should be included!!!
|
||||
@ -133,6 +162,9 @@ def create(vm_info):
|
||||
key_filename = config.get_cloud_config_value(
|
||||
'private_key', vm_info, __opts__, search_global=False, default=None
|
||||
)
|
||||
clone_mode = map_clonemode(vm_info)
|
||||
wait_for_pattern = vm_info['waitforpattern'] if 'waitforpattern' in vm_info.keys() else None
|
||||
interface_index = vm_info['interfaceindex'] if 'interfaceindex' in vm_info.keys() else 0
|
||||
|
||||
log.debug("Going to fire event: starting create")
|
||||
__utils__['cloud.fire_event'](
|
||||
@ -147,7 +179,8 @@ def create(vm_info):
|
||||
# to create the virtual machine.
|
||||
request_kwargs = {
|
||||
'name': vm_info['name'],
|
||||
'clone_from': vm_info['clonefrom']
|
||||
'clone_from': vm_info['clonefrom'],
|
||||
'clone_mode': clone_mode
|
||||
}
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
@ -163,17 +196,17 @@ def create(vm_info):
|
||||
# Booting and deploying if needed
|
||||
if power:
|
||||
vb_start_vm(vm_name, timeout=boot_timeout)
|
||||
ips = vb_wait_for_network_address(wait_for_ip_timeout, machine_name=vm_name)
|
||||
ips = vb_wait_for_network_address(wait_for_ip_timeout, machine_name=vm_name, wait_for_pattern=wait_for_pattern)
|
||||
|
||||
if len(ips):
|
||||
ip = ips[0]
|
||||
ip = ips[interface_index]
|
||||
log.info("[ {0} ] IPv4 is: {1}".format(vm_name, ip))
|
||||
# ssh or smb using ip and install salt only if deploy is True
|
||||
if deploy:
|
||||
vm_info['key_filename'] = key_filename
|
||||
vm_info['ssh_host'] = ip
|
||||
|
||||
res = __utils__['cloud.bootstrap'](vm_info)
|
||||
res = __utils__['cloud.bootstrap'](vm_info, __opts__)
|
||||
vm_result.update(res)
|
||||
|
||||
__utils__['cloud.fire_event'](
|
||||
|
@ -1156,6 +1156,30 @@ VALID_OPTS = {
|
||||
|
||||
# Used by salt.modules.dockermod.compare_container_networks to specify which keys are compared
|
||||
'docker.compare_container_networks': dict,
|
||||
|
||||
# SSDP discovery publisher description.
|
||||
# Contains publisher configuration and minion mapping.
|
||||
# Setting it to False disables discovery
|
||||
'discovery': (dict, bool),
|
||||
|
||||
# SSDP discovery mapping
|
||||
# Defines arbitrary data for description and grouping minions across various types of masters,
|
||||
# especially when masters are not related to each other.
|
||||
'mapping': dict,
|
||||
|
||||
# SSDP discovery mapping matcher policy
|
||||
# Values: "any" where at least one key/value pair should be found or
|
||||
# "all", where every key/value should be identical
|
||||
'match': str,
|
||||
|
||||
# Port definition.
|
||||
'port': int,
|
||||
|
||||
# SSDP discovery attempts to send query to the Universe
|
||||
'attempts': int,
|
||||
|
||||
# SSDP discovery pause between the attempts
|
||||
'pause': int,
|
||||
}
|
||||
|
||||
# default configurations
|
||||
@ -1440,6 +1464,13 @@ DEFAULT_MINION_OPTS = {
|
||||
'automatic': ['IPAddress', 'Gateway',
|
||||
'GlobalIPv6Address', 'IPv6Gateway'],
|
||||
},
|
||||
'discovery': {
|
||||
'attempts': 3,
|
||||
'pause': 5,
|
||||
'port': 4520,
|
||||
'match': 'any',
|
||||
'mapping': {},
|
||||
},
|
||||
}
|
||||
|
||||
DEFAULT_MASTER_OPTS = {
|
||||
@ -1758,6 +1789,10 @@ DEFAULT_MASTER_OPTS = {
|
||||
'salt_cp_chunk_size': 98304,
|
||||
'require_minion_sign_messages': False,
|
||||
'drop_messages_signature_fail': False,
|
||||
'discovery': {
|
||||
'port': 4520,
|
||||
'mapping': {},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
@ -5,6 +5,8 @@ masters, encrypting and decrypting payloads, preparing messages, and
|
||||
authenticating peers
|
||||
'''
|
||||
# Import python libs
|
||||
# NOTE: We can't use unicode_literals because this module implicitly uses
|
||||
# the Array class, which has incompatibilities with it.
|
||||
from __future__ import absolute_import, print_function
|
||||
import os
|
||||
import sys
|
||||
@ -171,7 +173,7 @@ def _get_rsa_key(path, passphrase):
|
||||
retrieve the key from disk.
|
||||
'''
|
||||
log.debug('salt.crypt._get_rsa_key: Loading private key')
|
||||
return _get_key_with_evict(path, str(os.path.getmtime(path)), passphrase)
|
||||
return _get_key_with_evict(path, six.text_type(os.path.getmtime(path)), passphrase)
|
||||
|
||||
|
||||
def sign_message(privkey_path, message, passphrase=None):
|
||||
|
@ -62,7 +62,7 @@ def jobber_check(self):
|
||||
rms.append(jid)
|
||||
data = self.shells.value[jid]
|
||||
stdout, stderr = data['proc'].communicate()
|
||||
ret = json.loads(salt.utils.stringutils.to_str(stdout), object_hook=salt.utils.data.decode_dict)['local']
|
||||
ret = json.loads(salt.utils.stringutils.to_str(stdout), object_hook=salt.utils.data.encode_dict)['local']
|
||||
route = {'src': (self.stack.value.local.name, 'manor', 'jid_ret'),
|
||||
'dst': (data['msg']['route']['src'][0], None, 'remote_cmd')}
|
||||
ret['cmd'] = '_return'
|
||||
|
@ -38,6 +38,7 @@ import salt.utils.minions
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.jid
|
||||
import salt.utils.minions
|
||||
import salt.utils.path
|
||||
import salt.utils.platform
|
||||
import salt.utils.stringutils
|
||||
import salt.utils.user
|
||||
@ -144,7 +145,7 @@ def clean_pub_auth(opts):
|
||||
if not os.path.exists(auth_cache):
|
||||
return
|
||||
else:
|
||||
for (dirpath, dirnames, filenames) in os.walk(auth_cache):
|
||||
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(auth_cache):
|
||||
for auth_file in filenames:
|
||||
auth_file_path = os.path.join(dirpath, auth_file)
|
||||
if not os.path.isfile(auth_file_path):
|
||||
@ -334,7 +335,7 @@ class AutoKey(object):
|
||||
expire_minutes = self.opts.get('autosign_timeout', 120)
|
||||
if expire_minutes > 0:
|
||||
min_time = time.time() - (60 * int(expire_minutes))
|
||||
for root, dirs, filenames in os.walk(autosign_dir):
|
||||
for root, dirs, filenames in salt.utils.path.os_walk(autosign_dir):
|
||||
for f in filenames:
|
||||
stub_file = os.path.join(autosign_dir, f)
|
||||
mtime = os.path.getmtime(stub_file)
|
||||
|
@ -54,7 +54,7 @@ class SaltException(Exception):
|
||||
# Some non-string input was passed. Run the parent dunder init with
|
||||
# a str version, and convert the passed value to unicode for the
|
||||
# message/strerror attributes.
|
||||
super(SaltException, self).__init__(str(message))
|
||||
super(SaltException, self).__init__(str(message)) # future lint: blacklisted-function
|
||||
self.message = self.strerror = unicode(message) # pylint: disable=incompatible-py3-code
|
||||
|
||||
def __unicode__(self):
|
||||
@ -66,8 +66,7 @@ class SaltException(Exception):
|
||||
transport via msgpack
|
||||
'''
|
||||
if six.PY3:
|
||||
# The message should be a str type, not a unicode
|
||||
return {'message': str(self), 'args': self.args}
|
||||
return {'message': six.text_type(self), 'args': self.args}
|
||||
return dict(message=self.__unicode__(), args=self.args)
|
||||
|
||||
|
||||
|
@ -10,6 +10,7 @@ import socket
|
||||
import ctypes
|
||||
import os
|
||||
import ipaddress
|
||||
import salt.ext.six as six
|
||||
|
||||
|
||||
class sockaddr(ctypes.Structure):
|
||||
@ -36,7 +37,7 @@ def inet_pton(address_family, ip_string):
|
||||
# This will catch IP Addresses such as 10.1.2
|
||||
if address_family == socket.AF_INET:
|
||||
try:
|
||||
ipaddress.ip_address(ip_string.decode())
|
||||
ipaddress.ip_address(six.u(ip_string))
|
||||
except ValueError:
|
||||
raise socket.error('illegal IP address string passed to inet_pton')
|
||||
return socket.inet_aton(ip_string)
|
||||
|
@ -2,7 +2,7 @@
|
||||
'''
|
||||
Classes that manage file clients
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
# Import python libs
|
||||
import contextlib
|
||||
@ -124,7 +124,7 @@ class Client(object):
|
||||
|
||||
filelist = set()
|
||||
|
||||
for root, dirs, files in os.walk(destdir, followlinks=True):
|
||||
for root, dirs, files in salt.utils.path.os_walk(destdir, followlinks=True):
|
||||
for name in files:
|
||||
path = os.path.join(root, name)
|
||||
filelist.add(path)
|
||||
@ -359,7 +359,7 @@ class Client(object):
|
||||
)
|
||||
return states
|
||||
for path in self.opts['file_roots'][saltenv]:
|
||||
for root, dirs, files in os.walk(path, topdown=True):
|
||||
for root, dirs, files in salt.utils.path.os_walk(path, topdown=True):
|
||||
log.debug(
|
||||
'Searching for states in dirs %s and files %s',
|
||||
dirs, files
|
||||
@ -504,7 +504,7 @@ class Client(object):
|
||||
'Path \'{0}\' is not absolute'.format(url_path)
|
||||
)
|
||||
if dest is None:
|
||||
with salt.utils.files.fopen(url_path, 'r') as fp_:
|
||||
with salt.utils.files.fopen(url_path, 'rb') as fp_:
|
||||
data = fp_.read()
|
||||
return data
|
||||
return url_path
|
||||
@ -512,7 +512,7 @@ class Client(object):
|
||||
if url_scheme == 'salt':
|
||||
result = self.get_file(url, dest, makedirs, saltenv, cachedir=cachedir)
|
||||
if result and dest is None:
|
||||
with salt.utils.files.fopen(result, 'r') as fp_:
|
||||
with salt.utils.files.fopen(result, 'rb') as fp_:
|
||||
data = fp_.read()
|
||||
return data
|
||||
return result
|
||||
@ -893,7 +893,7 @@ class LocalClient(Client):
|
||||
return ret
|
||||
prefix = prefix.strip('/')
|
||||
for path in self.opts['file_roots'][saltenv]:
|
||||
for root, dirs, files in os.walk(
|
||||
for root, dirs, files in salt.utils.path.os_walk(
|
||||
os.path.join(path, prefix), followlinks=True
|
||||
):
|
||||
# Don't walk any directories that match file_ignore_regex or glob
|
||||
@ -913,7 +913,7 @@ class LocalClient(Client):
|
||||
if saltenv not in self.opts['file_roots']:
|
||||
return ret
|
||||
for path in self.opts['file_roots'][saltenv]:
|
||||
for root, dirs, files in os.walk(
|
||||
for root, dirs, files in salt.utils.path.os_walk(
|
||||
os.path.join(path, prefix), followlinks=True
|
||||
):
|
||||
# Don't walk any directories that match file_ignore_regex or glob
|
||||
@ -932,7 +932,7 @@ class LocalClient(Client):
|
||||
return ret
|
||||
prefix = prefix.strip('/')
|
||||
for path in self.opts['file_roots'][saltenv]:
|
||||
for root, dirs, files in os.walk(
|
||||
for root, dirs, files in salt.utils.path.os_walk(
|
||||
os.path.join(path, prefix), followlinks=True
|
||||
):
|
||||
ret.append(sdecode(os.path.relpath(root, path)))
|
||||
@ -1232,7 +1232,7 @@ class RemoteClient(Client):
|
||||
data_type = type(data).__name__
|
||||
except AttributeError:
|
||||
# Shouldn't happen, but don't let this cause a traceback.
|
||||
data_type = str(type(data))
|
||||
data_type = six.text_type(type(data))
|
||||
transport_tries += 1
|
||||
log.warning(
|
||||
'Data transport is broken, got: %s, type: %s, '
|
||||
|
@ -17,6 +17,7 @@ import time
|
||||
import salt.loader
|
||||
import salt.utils.files
|
||||
import salt.utils.locales
|
||||
import salt.utils.path
|
||||
import salt.utils.url
|
||||
import salt.utils.versions
|
||||
from salt.utils.args import get_function_argspec as _argspec
|
||||
@ -182,7 +183,7 @@ def generate_mtime_map(opts, path_map):
|
||||
file_map = {}
|
||||
for saltenv, path_list in six.iteritems(path_map):
|
||||
for path in path_list:
|
||||
for directory, dirnames, filenames in os.walk(path):
|
||||
for directory, dirnames, filenames in salt.utils.path.os_walk(path):
|
||||
# Don't walk any directories that match file_ignore_regex or glob
|
||||
dirnames[:] = [d for d in dirnames if not is_file_ignored(opts, d)]
|
||||
for item in filenames:
|
||||
@ -225,7 +226,7 @@ def reap_fileserver_cache_dir(cache_base, find_func):
|
||||
'''
|
||||
for saltenv in os.listdir(cache_base):
|
||||
env_base = os.path.join(cache_base, saltenv)
|
||||
for root, dirs, files in os.walk(env_base):
|
||||
for root, dirs, files in salt.utils.path.os_walk(env_base):
|
||||
# if we have an empty directory, lets cleanup
|
||||
# This will only remove the directory on the second time
|
||||
# "_reap_cache" is called (which is intentional)
|
||||
|
@ -207,7 +207,7 @@ def update():
|
||||
# Walk the cache directory searching for deletions
|
||||
blob_names = [blob.name for blob in blob_list]
|
||||
blob_set = set(blob_names)
|
||||
for root, dirs, files in os.walk(path):
|
||||
for root, dirs, files in salt.utils.path.os_walk(path):
|
||||
for f in files:
|
||||
fname = os.path.join(root, f)
|
||||
relpath = os.path.relpath(fname, path)
|
||||
|
@ -34,6 +34,7 @@ import salt.fileserver
|
||||
import salt.utils.files
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.hashutils
|
||||
import salt.utils.path
|
||||
import salt.utils.stringutils
|
||||
import salt.utils.url
|
||||
import salt.utils.versions
|
||||
@ -275,7 +276,7 @@ def file_list(load):
|
||||
continue
|
||||
walk_dir = os.path.join(minion_files_dir, prefix)
|
||||
# Do not follow links for security reasons
|
||||
for root, _, files in os.walk(walk_dir, followlinks=False):
|
||||
for root, _, files in salt.utils.path.os_walk(walk_dir, followlinks=False):
|
||||
for fname in files:
|
||||
# Ignore links for security reasons
|
||||
if os.path.islink(os.path.join(root, fname)):
|
||||
@ -354,7 +355,7 @@ def dir_list(load):
|
||||
continue
|
||||
walk_dir = os.path.join(minion_files_dir, prefix)
|
||||
# Do not follow links for security reasons
|
||||
for root, _, _ in os.walk(walk_dir, followlinks=False):
|
||||
for root, _, _ in salt.utils.path.os_walk(walk_dir, followlinks=False):
|
||||
relpath = os.path.relpath(root, minion_files_dir)
|
||||
# Ensure that the current directory and directories outside of
|
||||
# the minion dir do not end up in return list
|
||||
|
@ -386,8 +386,8 @@ def _file_lists(load, form):
|
||||
ret['links'][rel_path] = link_dest
|
||||
|
||||
for path in __opts__['file_roots'][load['saltenv']]:
|
||||
for root, dirs, files in os.walk(
|
||||
salt.utils.stringutils.to_unicode(path),
|
||||
for root, dirs, files in salt.utils.path.os_walk(
|
||||
path,
|
||||
followlinks=__opts__['fileserver_followsymlinks']):
|
||||
_add_to(ret['dirs'], path, root, dirs)
|
||||
_add_to(ret['files'], path, root, files)
|
||||
|
@ -58,6 +58,7 @@ import salt.utils.data
|
||||
import salt.utils.files
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.hashutils
|
||||
import salt.utils.path
|
||||
import salt.utils.stringutils
|
||||
import salt.utils.url
|
||||
import salt.utils.versions
|
||||
@ -754,7 +755,7 @@ def _file_lists(load, form):
|
||||
# svnfs root (global or per-remote) does not exist in env
|
||||
continue
|
||||
|
||||
for root, dirs, files in os.walk(env_root):
|
||||
for root, dirs, files in salt.utils.path.os_walk(env_root):
|
||||
relpath = os.path.relpath(root, env_root)
|
||||
dir_rel_fn = os.path.join(repo['mountpoint'], relpath)
|
||||
if relpath != '.':
|
||||
|
@ -425,14 +425,14 @@ def _osx_memdata():
|
||||
sysctl = salt.utils.path.which('sysctl')
|
||||
if sysctl:
|
||||
mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl))
|
||||
swap_total = __salt__['cmd.run']('{0} -n vm.swapusage').split()[2]
|
||||
swap_total = __salt__['cmd.run']('{0} -n vm.swapusage'.format(sysctl)).split()[2]
|
||||
if swap_total.endswith('K'):
|
||||
_power = 2**10
|
||||
elif swap_total.endswith('M'):
|
||||
_power = 2**20
|
||||
elif swap_total.endswith('G'):
|
||||
_power = 2**30
|
||||
swap_total = swap_total[:-1] * _power
|
||||
swap_total = float(swap_total[:-1]) * _power
|
||||
|
||||
grains['mem_total'] = int(mem) // 1024 // 1024
|
||||
grains['swap_total'] = int(swap_total) // 1024 // 1024
|
||||
|
@ -13,6 +13,7 @@ import logging
|
||||
import salt.utils.files
|
||||
import salt.utils.platform
|
||||
|
||||
__proxyenabled__ = ['*']
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -39,16 +40,33 @@ def config():
|
||||
if 'conf_file' not in __opts__:
|
||||
return {}
|
||||
if os.path.isdir(__opts__['conf_file']):
|
||||
gfn = os.path.join(
|
||||
__opts__['conf_file'],
|
||||
'grains'
|
||||
)
|
||||
if salt.utils.platform.is_proxy():
|
||||
gfn = os.path.join(
|
||||
__opts__['conf_file'],
|
||||
'proxy.d',
|
||||
__opts__['id'],
|
||||
'grains'
|
||||
)
|
||||
else:
|
||||
gfn = os.path.join(
|
||||
__opts__['conf_file'],
|
||||
'grains'
|
||||
)
|
||||
else:
|
||||
gfn = os.path.join(
|
||||
os.path.dirname(__opts__['conf_file']),
|
||||
'grains'
|
||||
)
|
||||
if salt.utils.platform.is_proxy():
|
||||
gfn = os.path.join(
|
||||
os.path.dirname(__opts__['conf_file']),
|
||||
'proxy.d',
|
||||
__opts__['id'],
|
||||
'grains'
|
||||
)
|
||||
else:
|
||||
gfn = os.path.join(
|
||||
os.path.dirname(__opts__['conf_file']),
|
||||
'grains'
|
||||
)
|
||||
if os.path.isfile(gfn):
|
||||
log.debug('Loading static grains from %s', gfn)
|
||||
with salt.utils.files.fopen(gfn, 'rb') as fp_:
|
||||
try:
|
||||
return yaml.safe_load(fp_.read())
|
||||
|
21
salt/key.py
21
salt/key.py
@ -5,7 +5,7 @@ used to manage salt keys directly without interfacing with the CLI.
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, print_function
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import os
|
||||
import copy
|
||||
import json
|
||||
@ -30,6 +30,7 @@ import salt.utils.files
|
||||
import salt.utils.kinds
|
||||
import salt.utils.master
|
||||
import salt.utils.sdb
|
||||
import salt.utils.stringutils
|
||||
import salt.utils.user
|
||||
|
||||
# pylint: disable=import-error,no-name-in-module,redefined-builtin
|
||||
@ -125,7 +126,8 @@ class KeyCLI(object):
|
||||
if 'token' in self.opts:
|
||||
try:
|
||||
with salt.utils.files.fopen(os.path.join(self.opts['key_dir'], '.root_key'), 'r') as fp_:
|
||||
low['key'] = fp_.readline()
|
||||
low['key'] = \
|
||||
salt.utils.stringutils.to_unicode(fp_.readline())
|
||||
except IOError:
|
||||
low['token'] = self.opts['token']
|
||||
#
|
||||
@ -606,7 +608,9 @@ class Key(object):
|
||||
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(dir_)):
|
||||
if not fn_.startswith('.'):
|
||||
if os.path.isfile(os.path.join(dir_, fn_)):
|
||||
ret[os.path.basename(dir_)].append(fn_)
|
||||
ret[os.path.basename(dir_)].append(
|
||||
salt.utils.stringutils.to_unicode(fn_)
|
||||
)
|
||||
except (OSError, IOError):
|
||||
# key dir kind is not created yet, just skip
|
||||
continue
|
||||
@ -664,7 +668,8 @@ class Key(object):
|
||||
for key in salt.utils.data.sorted_ignorecase(keys):
|
||||
path = os.path.join(self.opts['pki_dir'], status, key)
|
||||
with salt.utils.files.fopen(path, 'r') as fp_:
|
||||
ret[status][key] = fp_.read()
|
||||
ret[status][key] = \
|
||||
salt.utils.stringutils.to_unicode(fp_.read())
|
||||
return ret
|
||||
|
||||
def key_str_all(self):
|
||||
@ -677,7 +682,8 @@ class Key(object):
|
||||
for key in salt.utils.data.sorted_ignorecase(keys):
|
||||
path = os.path.join(self.opts['pki_dir'], status, key)
|
||||
with salt.utils.files.fopen(path, 'r') as fp_:
|
||||
ret[status][key] = fp_.read()
|
||||
ret[status][key] = \
|
||||
salt.utils.stringutils.to_unicode(fp_.read())
|
||||
return ret
|
||||
|
||||
def accept(self, match=None, match_dict=None, include_rejected=False, include_denied=False):
|
||||
@ -1026,11 +1032,14 @@ class RaetKey(Key):
|
||||
continue
|
||||
path = os.path.join(road_cache, road)
|
||||
with salt.utils.files.fopen(path, 'rb') as fp_:
|
||||
# Do not use to_unicode to decode this. It needs to stay as
|
||||
# bytes to be deserialized.
|
||||
if ext == '.json':
|
||||
data = json.load(fp_)
|
||||
elif ext == '.msgpack':
|
||||
data = msgpack.load(fp_)
|
||||
if data['role'] not in minions:
|
||||
role = salt.utils.stringutils.to_unicode(data['role'])
|
||||
if role not in minions:
|
||||
os.remove(path)
|
||||
|
||||
def gen_keys(self, keydir=None, keyname=None, keysize=None, user=None):
|
||||
|
@ -6,7 +6,7 @@ plugin interfaces used by Salt.
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
@ -989,7 +989,7 @@ def _generate_module(name):
|
||||
|
||||
code = u"'''Salt loaded {0} parent module'''".format(name.split('.')[-1])
|
||||
# ModuleType can't accept a unicode type on PY2
|
||||
module = types.ModuleType(str(name))
|
||||
module = types.ModuleType(str(name)) # future lint: disable=blacklisted-function
|
||||
exec(code, module.__dict__)
|
||||
sys.modules[name] = module
|
||||
|
||||
@ -1436,7 +1436,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
|
||||
except IOError:
|
||||
raise
|
||||
except ImportError as exc:
|
||||
if 'magic number' in str(exc):
|
||||
if 'magic number' in six.text_type(exc):
|
||||
error_msg = 'Failed to import {0} {1}. Bad magic number. If migrating from Python2 to Python3, remove all .pyc files and try again.'.format(self.tag, name)
|
||||
log.warning(error_msg)
|
||||
self.missing_modules[name] = error_msg
|
||||
|
@ -5,7 +5,7 @@ involves preparing the three listeners and the workers needed by the master.
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, with_statement
|
||||
from __future__ import absolute_import, with_statement, unicode_literals
|
||||
import copy
|
||||
import ctypes
|
||||
import os
|
||||
@ -80,7 +80,9 @@ import salt.utils.schedule
|
||||
import salt.utils.user
|
||||
import salt.utils.verify
|
||||
import salt.utils.zeromq
|
||||
import salt.utils.ssdp
|
||||
from salt.defaults import DEFAULT_TARGET_DELIM
|
||||
from salt.config import DEFAULT_MASTER_OPTS
|
||||
from salt.exceptions import FileserverConfigError
|
||||
from salt.transport import iter_transport_opts
|
||||
from salt.utils.debug import (
|
||||
@ -604,6 +606,18 @@ class Master(SMaster):
|
||||
kwargs=kwargs,
|
||||
name='ReqServer')
|
||||
|
||||
# Fire up SSDP discovery publisher
|
||||
if self.opts[u'discovery']:
|
||||
if salt.utils.ssdp.SSDPDiscoveryServer.is_available():
|
||||
self.process_manager.add_process(salt.utils.ssdp.SSDPDiscoveryServer(
|
||||
port=self.opts[u'discovery'].get(u'port', DEFAULT_MASTER_OPTS[u'discovery'][u'port']),
|
||||
listen_ip=self.opts[u'interface'],
|
||||
answer={u'mapping': self.opts[u'discovery'].get(u'mapping', {})}).run)
|
||||
else:
|
||||
log.error(u'Unable to load SSDP: asynchronous IO is not available.')
|
||||
if sys.version_info.major == 2:
|
||||
log.error(u'You are using Python 2, please install "trollius" module to enable SSDP discovery.')
|
||||
|
||||
# Install the SIGINT/SIGTERM handlers if not done so far
|
||||
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
|
||||
# No custom signal handling was added, install our own
|
||||
@ -1529,7 +1543,7 @@ class AESFuncs(object):
|
||||
'publish_auth')
|
||||
if not os.path.isdir(auth_cache):
|
||||
os.makedirs(auth_cache)
|
||||
jid_fn = os.path.join(auth_cache, str(load['jid']))
|
||||
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
|
||||
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
|
||||
if not load['id'] == fp_.read():
|
||||
return {}
|
||||
@ -1758,8 +1772,8 @@ class ClearFuncs(object):
|
||||
except Exception as exc:
|
||||
log.error('Exception occurred while introspecting %s: %s', fun, exc)
|
||||
return {'error': {'name': exc.__class__.__name__,
|
||||
'args': exc.args,
|
||||
'message': str(exc)}}
|
||||
'args': exc.args,
|
||||
'message': six.text_type(exc)}}
|
||||
|
||||
def wheel(self, clear_load):
|
||||
'''
|
||||
|
269
salt/minion.py
269
salt/minion.py
@ -3,7 +3,7 @@
|
||||
Routines to set up a minion
|
||||
'''
|
||||
# Import python libs
|
||||
from __future__ import absolute_import, print_function, with_statement
|
||||
from __future__ import absolute_import, print_function, with_statement, unicode_literals
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
@ -105,6 +105,7 @@ import salt.utils.network
|
||||
import salt.utils.platform
|
||||
import salt.utils.process
|
||||
import salt.utils.schedule
|
||||
import salt.utils.ssdp
|
||||
import salt.utils.user
|
||||
import salt.utils.zeromq
|
||||
import salt.defaults.exitcodes
|
||||
@ -482,8 +483,13 @@ class MinionBase(object):
|
||||
log.warning('Master is set to disable, skipping connection')
|
||||
self.connected = False
|
||||
raise tornado.gen.Return((None, None))
|
||||
|
||||
# Run masters discovery over SSDP. This may modify the whole configuration,
|
||||
# depending of the networking and sets of masters.
|
||||
self._discover_masters()
|
||||
|
||||
# check if master_type was altered from its default
|
||||
elif opts['master_type'] != 'str' and opts['__role'] != 'syndic':
|
||||
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
|
||||
# check for a valid keyword
|
||||
if opts['master_type'] == 'func':
|
||||
eval_master_func(opts)
|
||||
@ -705,6 +711,43 @@ class MinionBase(object):
|
||||
self.connected = False
|
||||
raise exc
|
||||
|
||||
def _discover_masters(self):
|
||||
'''
|
||||
Discover master(s) and decide where to connect, if SSDP is around.
|
||||
This modifies the configuration on the fly.
|
||||
:return:
|
||||
'''
|
||||
if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False:
|
||||
master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient()
|
||||
masters = {}
|
||||
for att in range(self.opts['discovery'].get('attempts', 3)):
|
||||
try:
|
||||
att += 1
|
||||
log.info('Attempting {0} time{1} to discover masters'.format(att, (att > 1 and 's' or '')))
|
||||
masters.update(master_discovery_client.discover())
|
||||
if not masters:
|
||||
time.sleep(self.opts['discovery'].get('pause', 5))
|
||||
else:
|
||||
break
|
||||
except Exception as err:
|
||||
log.error('SSDP discovery failure: {0}'.format(err))
|
||||
break
|
||||
|
||||
if masters:
|
||||
policy = self.opts.get(u'discovery', {}).get(u'match', DEFAULT_MINION_OPTS[u'discovery'][u'match'])
|
||||
if policy not in ['any', 'all']:
|
||||
log.error('SSDP configuration matcher failure: unknown value "{0}". '
|
||||
'Should be "any" or "all"'.format(policy))
|
||||
else:
|
||||
mapping = self.opts[u'discovery'].get(u'mapping', {})
|
||||
for addr, mappings in masters.items():
|
||||
for proto_data in mappings:
|
||||
cnt = len([key for key, value in mapping.items()
|
||||
if proto_data.get('mapping', {}).get(key) == value])
|
||||
if policy == 'any' and bool(cnt) or cnt == len(mapping):
|
||||
self.opts['master'] = proto_data['master']
|
||||
return
|
||||
|
||||
def _return_retry_timer(self):
|
||||
'''
|
||||
Based on the minion configuration, either return a randomized timer or
|
||||
@ -962,6 +1005,10 @@ class MinionManager(MinionBase):
|
||||
failed = False
|
||||
while True:
|
||||
try:
|
||||
if minion.opts.get('beacons_before_connect', False):
|
||||
minion.setup_beacons(before_connect=True)
|
||||
if minion.opts.get('scheduler_before_connect', False):
|
||||
minion.setup_scheduler(before_connect=True)
|
||||
yield minion.connect_master(failed=failed)
|
||||
minion.tune_in(start=False)
|
||||
break
|
||||
@ -1042,6 +1089,7 @@ class Minion(MinionBase):
|
||||
# True means the Minion is fully functional and ready to handle events.
|
||||
self.ready = False
|
||||
self.jid_queue = jid_queue or []
|
||||
self.periodic_callbacks = {}
|
||||
|
||||
if io_loop is None:
|
||||
if HAS_ZMQ:
|
||||
@ -1073,6 +1121,19 @@ class Minion(MinionBase):
|
||||
# post_master_init
|
||||
if not salt.utils.platform.is_proxy():
|
||||
self.opts['grains'] = salt.loader.grains(opts)
|
||||
else:
|
||||
if self.opts.get('beacons_before_connect', False):
|
||||
log.warning(
|
||||
'\'beacons_before_connect\' is not supported '
|
||||
'for proxy minions. Setting to False'
|
||||
)
|
||||
self.opts['beacons_before_connect'] = False
|
||||
if self.opts.get('scheduler_before_connect', False):
|
||||
log.warning(
|
||||
'\'scheduler_before_connect\' is not supported '
|
||||
'for proxy minions. Setting to False'
|
||||
)
|
||||
self.opts['scheduler_before_connect'] = False
|
||||
|
||||
log.info('Creating minion process manager')
|
||||
|
||||
@ -1178,19 +1239,22 @@ class Minion(MinionBase):
|
||||
pillarenv=self.opts.get('pillarenv')
|
||||
).compile_pillar()
|
||||
|
||||
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
|
||||
self.serial = salt.payload.Serial(self.opts)
|
||||
self.mod_opts = self._prep_mod_opts()
|
||||
self.matcher = Matcher(self.opts, self.functions)
|
||||
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
|
||||
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
|
||||
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
|
||||
if not self.ready:
|
||||
self._setup_core()
|
||||
elif self.connected and self.opts['pillar']:
|
||||
# The pillar has changed due to the connection to the master.
|
||||
# Reload the functions so that they can use the new pillar data.
|
||||
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
|
||||
if hasattr(self, 'schedule'):
|
||||
self.schedule.functions = self.functions
|
||||
self.schedule.returners = self.returners
|
||||
|
||||
self.schedule = salt.utils.schedule.Schedule(
|
||||
self.opts,
|
||||
self.functions,
|
||||
self.returners,
|
||||
cleanup=[master_event(type='alive')])
|
||||
if not hasattr(self, 'schedule'):
|
||||
self.schedule = salt.utils.schedule.Schedule(
|
||||
self.opts,
|
||||
self.functions,
|
||||
self.returners,
|
||||
cleanup=[master_event(type='alive')])
|
||||
|
||||
# add default scheduling jobs to the minions scheduler
|
||||
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
|
||||
@ -1244,9 +1308,6 @@ class Minion(MinionBase):
|
||||
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
|
||||
self.schedule.delete_job(master_event(type='failback'), persist=True)
|
||||
|
||||
self.grains_cache = self.opts['grains']
|
||||
self.ready = True
|
||||
|
||||
def _prep_mod_opts(self):
|
||||
'''
|
||||
Returns a copy of the opts with key bits stripped out
|
||||
@ -1577,7 +1638,7 @@ class Minion(MinionBase):
|
||||
if not iret:
|
||||
iret = []
|
||||
iret.append(single)
|
||||
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
|
||||
tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
|
||||
event_data = {'return': single}
|
||||
minion_instance._fire_master(event_data, tag)
|
||||
ind += 1
|
||||
@ -1818,6 +1879,7 @@ class Minion(MinionBase):
|
||||
# The file is gone already
|
||||
pass
|
||||
log.info('Returning information for job: %s', jid)
|
||||
log.trace('Return data: %s', ret)
|
||||
if ret_cmd == '_syndic_return':
|
||||
load = {'cmd': ret_cmd,
|
||||
'id': self.opts['uid'],
|
||||
@ -2410,6 +2472,119 @@ class Minion(MinionBase):
|
||||
except (ValueError, NameError):
|
||||
pass
|
||||
|
||||
def _setup_core(self):
|
||||
'''
|
||||
Set up the core minion attributes.
|
||||
This is safe to call multiple times.
|
||||
'''
|
||||
if not self.ready:
|
||||
# First call. Initialize.
|
||||
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
|
||||
self.serial = salt.payload.Serial(self.opts)
|
||||
self.mod_opts = self._prep_mod_opts()
|
||||
self.matcher = Matcher(self.opts, self.functions)
|
||||
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
|
||||
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
|
||||
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
|
||||
self.grains_cache = self.opts['grains']
|
||||
self.ready = True
|
||||
|
||||
def setup_beacons(self, before_connect=False):
|
||||
'''
|
||||
Set up the beacons.
|
||||
This is safe to call multiple times.
|
||||
'''
|
||||
self._setup_core()
|
||||
|
||||
loop_interval = self.opts['loop_interval']
|
||||
new_periodic_callbacks = {}
|
||||
|
||||
if 'beacons' not in self.periodic_callbacks:
|
||||
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
|
||||
|
||||
def handle_beacons():
|
||||
# Process Beacons
|
||||
beacons = None
|
||||
try:
|
||||
beacons = self.process_beacons(self.functions)
|
||||
except Exception:
|
||||
log.critical('The beacon errored: ', exc_info=True)
|
||||
if beacons and self.connected:
|
||||
self._fire_master(events=beacons)
|
||||
|
||||
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
|
||||
if before_connect:
|
||||
# Make sure there is a chance for one iteration to occur before connect
|
||||
handle_beacons()
|
||||
|
||||
if 'cleanup' not in self.periodic_callbacks:
|
||||
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
# start all the other callbacks
|
||||
for periodic_cb in six.itervalues(new_periodic_callbacks):
|
||||
periodic_cb.start()
|
||||
|
||||
self.periodic_callbacks.update(new_periodic_callbacks)
|
||||
|
||||
def setup_scheduler(self, before_connect=False):
|
||||
'''
|
||||
Set up the scheduler.
|
||||
This is safe to call multiple times.
|
||||
'''
|
||||
self._setup_core()
|
||||
|
||||
loop_interval = self.opts['loop_interval']
|
||||
new_periodic_callbacks = {}
|
||||
|
||||
if 'schedule' not in self.periodic_callbacks:
|
||||
if 'schedule' not in self.opts:
|
||||
self.opts['schedule'] = {}
|
||||
if not hasattr(self, 'schedule'):
|
||||
self.schedule = salt.utils.schedule.Schedule(
|
||||
self.opts,
|
||||
self.functions,
|
||||
self.returners,
|
||||
cleanup=[master_event(type='alive')])
|
||||
|
||||
try:
|
||||
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
|
||||
if self.opts['grains_refresh_every'] > 1:
|
||||
log.debug(
|
||||
'Enabling the grains refresher. Will run every {0} minutes.'.format(
|
||||
self.opts['grains_refresh_every'])
|
||||
)
|
||||
else: # Clean up minute vs. minutes in log message
|
||||
log.debug(
|
||||
'Enabling the grains refresher. Will run every {0} minute.'.format(
|
||||
self.opts['grains_refresh_every'])
|
||||
)
|
||||
self._refresh_grains_watcher(
|
||||
abs(self.opts['grains_refresh_every'])
|
||||
)
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
|
||||
exc)
|
||||
)
|
||||
|
||||
# TODO: actually listen to the return and change period
|
||||
def handle_schedule():
|
||||
self.process_schedule(self, loop_interval)
|
||||
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
|
||||
|
||||
if before_connect:
|
||||
# Make sure there is a chance for one iteration to occur before connect
|
||||
handle_schedule()
|
||||
|
||||
if 'cleanup' not in self.periodic_callbacks:
|
||||
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
# start all the other callbacks
|
||||
for periodic_cb in six.itervalues(new_periodic_callbacks):
|
||||
periodic_cb.start()
|
||||
|
||||
self.periodic_callbacks.update(new_periodic_callbacks)
|
||||
|
||||
# Main Minion Tune In
|
||||
def tune_in(self, start=True):
|
||||
'''
|
||||
@ -2421,6 +2596,10 @@ class Minion(MinionBase):
|
||||
log.debug('Minion \'%s\' trying to tune in', self.opts['id'])
|
||||
|
||||
if start:
|
||||
if self.opts.get('beacons_before_connect', False):
|
||||
self.setup_beacons(before_connect=True)
|
||||
if self.opts.get('scheduler_before_connect', False):
|
||||
self.setup_scheduler(before_connect=True)
|
||||
self.sync_connect_master()
|
||||
if self.connected:
|
||||
self._fire_master_minion_start()
|
||||
@ -2436,26 +2615,9 @@ class Minion(MinionBase):
|
||||
# On first startup execute a state run if configured to do so
|
||||
self._state_run()
|
||||
|
||||
loop_interval = self.opts['loop_interval']
|
||||
self.setup_beacons()
|
||||
self.setup_scheduler()
|
||||
|
||||
try:
|
||||
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
|
||||
log.debug(
|
||||
'Enabling the grains refresher. Will run every %s '
|
||||
'minute%s.',
|
||||
self.opts['grains_refresh_every'],
|
||||
's' if self.opts['grains_refresh_every'] > 1 else ''
|
||||
)
|
||||
self._refresh_grains_watcher(
|
||||
abs(self.opts['grains_refresh_every'])
|
||||
)
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
'Exception occurred in attempt to initialize grain refresh '
|
||||
'routine during minion tune-in: %s', exc
|
||||
)
|
||||
|
||||
self.periodic_callbacks = {}
|
||||
# schedule the stuff that runs every interval
|
||||
ping_interval = self.opts.get('ping_interval', 0) * 60
|
||||
if ping_interval > 0 and self.connected:
|
||||
@ -2473,30 +2635,7 @@ class Minion(MinionBase):
|
||||
except Exception:
|
||||
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
|
||||
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
def handle_beacons():
|
||||
# Process Beacons
|
||||
beacons = None
|
||||
try:
|
||||
beacons = self.process_beacons(self.functions)
|
||||
except Exception:
|
||||
log.critical('The beacon errored: ', exc_info=True)
|
||||
if beacons and self.connected:
|
||||
self._fire_master(events=beacons, sync=False)
|
||||
|
||||
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
|
||||
|
||||
# TODO: actually listen to the return and change period
|
||||
def handle_schedule():
|
||||
self.process_schedule(self, loop_interval)
|
||||
if hasattr(self, 'schedule'):
|
||||
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
|
||||
|
||||
# start all the other callbacks
|
||||
for periodic_cb in six.itervalues(self.periodic_callbacks):
|
||||
periodic_cb.start()
|
||||
self.periodic_callbacks['ping'].start()
|
||||
|
||||
# add handler to subscriber
|
||||
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
|
||||
@ -3144,7 +3283,7 @@ class Matcher(object):
|
||||
if isinstance(val, list):
|
||||
# We are matching a single component to a single list member
|
||||
for member in val:
|
||||
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
|
||||
if fnmatch.fnmatch(six.text_type(member).lower(), comps[1].lower()):
|
||||
return True
|
||||
return False
|
||||
if isinstance(val, dict):
|
||||
@ -3217,7 +3356,7 @@ class Matcher(object):
|
||||
if proto not in grains:
|
||||
match = False
|
||||
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
|
||||
match = str(tgt) in grains[proto]
|
||||
match = six.text_type(tgt) in grains[proto]
|
||||
else:
|
||||
match = salt.utils.network.in_subnet(tgt, grains[proto])
|
||||
|
||||
@ -3304,12 +3443,12 @@ class Matcher(object):
|
||||
engine_kwargs['delimiter'] = target_info['delimiter']
|
||||
|
||||
results.append(
|
||||
str(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
|
||||
six.text_type(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
|
||||
)
|
||||
|
||||
else:
|
||||
# The match is not explicitly defined, evaluate it as a glob
|
||||
results.append(str(self.glob_match(word)))
|
||||
results.append(six.text_type(self.glob_match(word)))
|
||||
|
||||
results = ' '.join(results)
|
||||
log.debug('compound_match %s ? "%s" => "%s"', self.opts['id'], tgt, results)
|
||||
|
96
salt/modules/aix_shadow.py
Normal file
96
salt/modules/aix_shadow.py
Normal file
@ -0,0 +1,96 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage account locks on AIX systems
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
:depends: none
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Import python librarie
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Define the module's virtual name
|
||||
__virtualname__ = 'shadow'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load if kernel is AIX
|
||||
'''
|
||||
if __grains__['kernel'] == 'AIX':
|
||||
return __virtualname__
|
||||
return (False, 'The aix_shadow execution module failed to load: '
|
||||
'only available on AIX systems.')
|
||||
|
||||
|
||||
def login_failures(user):
|
||||
|
||||
'''
|
||||
Query for all accounts which have 3 or more login failures.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt <minion_id> shadow.login_failures ALL
|
||||
'''
|
||||
|
||||
cmd = 'lsuser -a unsuccessful_login_count {0}'.format(user)
|
||||
cmd += " | grep -E 'unsuccessful_login_count=([3-9]|[0-9][0-9]+)'"
|
||||
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=True)
|
||||
|
||||
ret = []
|
||||
|
||||
lines = out['stdout'].splitlines()
|
||||
for line in lines:
|
||||
ret.append(line.split()[0])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def locked(user):
|
||||
'''
|
||||
Query for all accounts which are flagged as locked.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt <minion_id> shadow.locked ALL
|
||||
'''
|
||||
|
||||
cmd = 'lsuser -a account_locked {0}'.format(user)
|
||||
cmd += ' | grep "account_locked=true"'
|
||||
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=True)
|
||||
|
||||
ret = []
|
||||
|
||||
lines = out['stdout'].splitlines()
|
||||
for line in lines:
|
||||
ret.append(line.split()[0])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def unlock(user):
|
||||
'''
|
||||
Unlock user for locked account
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt <minion_id> shadow.unlock user
|
||||
'''
|
||||
|
||||
cmd = 'chuser account_locked=false {0} | ' \
|
||||
'chsec -f /etc/security/lastlog -a "unsuccessful_login_count=0" -s {0}'.format(user)
|
||||
ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=True)
|
||||
|
||||
return ret
|
@ -797,7 +797,7 @@ def zip_(zip_file, sources, template=None, cwd=None, runas=None):
|
||||
else:
|
||||
rel_root = cwd if cwd is not None else '/'
|
||||
if os.path.isdir(src):
|
||||
for dir_name, sub_dirs, files in os.walk(src):
|
||||
for dir_name, sub_dirs, files in salt.utils.path.os_walk(src):
|
||||
if cwd and dir_name.startswith(cwd):
|
||||
arc_dir = os.path.relpath(dir_name, cwd)
|
||||
else:
|
||||
|
@ -170,6 +170,9 @@ def atrm(*args):
|
||||
if not args:
|
||||
return {'jobs': {'removed': [], 'tag': None}}
|
||||
|
||||
# Convert all to strings
|
||||
args = [str(arg) for arg in args]
|
||||
|
||||
if args[0] == 'all':
|
||||
if len(args) > 1:
|
||||
opts = list(list(map(str, [j['job'] for j in atq(args[1])['jobs']])))
|
||||
@ -179,7 +182,7 @@ def atrm(*args):
|
||||
ret = {'jobs': {'removed': opts, 'tag': None}}
|
||||
else:
|
||||
opts = list(list(map(str, [i['job'] for i in atq()['jobs']
|
||||
if i['job'] in args])))
|
||||
if str(i['job']) in args])))
|
||||
ret = {'jobs': {'removed': opts, 'tag': None}}
|
||||
|
||||
# Shim to produce output similar to what __virtual__() should do
|
||||
|
@ -71,7 +71,7 @@ def uuid(dev=None):
|
||||
try:
|
||||
if dev is None:
|
||||
# take the only directory in /sys/fs/bcache and return it's basename
|
||||
return list(os.walk('/sys/fs/bcache/'))[0][1][0]
|
||||
return list(salt.utils.path.os_walk('/sys/fs/bcache/'))[0][1][0]
|
||||
else:
|
||||
# basename of the /sys/block/{dev}/bcache/cache symlink target
|
||||
return os.path.basename(_bcsys(dev, 'cache'))
|
||||
@ -425,12 +425,12 @@ def status(stats=False, config=False, internals=False, superblock=False, alldevs
|
||||
:param superblock: include superblock
|
||||
'''
|
||||
bdevs = []
|
||||
for _, links, _ in os.walk('/sys/block/'):
|
||||
for _, links, _ in salt.utils.path.os_walk('/sys/block/'):
|
||||
for block in links:
|
||||
if 'bcache' in block:
|
||||
continue
|
||||
|
||||
for spath, sdirs, _ in os.walk('/sys/block/{0}'.format(block), followlinks=False):
|
||||
for spath, sdirs, _ in salt.utils.path.os_walk('/sys/block/{0}'.format(block), followlinks=False):
|
||||
if 'bcache' in sdirs:
|
||||
bdevs.append(os.path.basename(spath))
|
||||
statii = {}
|
||||
|
@ -66,6 +66,7 @@ try:
|
||||
#pylint: disable=unused-import
|
||||
import boto
|
||||
import boto.route53
|
||||
import boto.route53.healthcheck
|
||||
from boto.route53.exception import DNSServerError
|
||||
#pylint: enable=unused-import
|
||||
# create_zone params were changed in boto 2.35+
|
||||
@ -340,6 +341,108 @@ def create_zone(zone, private=False, vpc_id=None, vpc_region=None, region=None,
|
||||
return True
|
||||
|
||||
|
||||
def create_healthcheck(ip_addr=None, fqdn=None, region=None, key=None, keyid=None, profile=None,
|
||||
port=53, hc_type='TCP', resource_path='', string_match=None, request_interval=30,
|
||||
failure_threshold=3, retry_on_errors=True, error_retries=5):
|
||||
'''
|
||||
Create a Route53 healthcheck
|
||||
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
ip_addr
|
||||
|
||||
IP address to check. ip_addr or fqdn is required.
|
||||
|
||||
fqdn
|
||||
|
||||
Domain name of the endpoint to check. ip_addr or fqdn is required
|
||||
|
||||
port
|
||||
|
||||
Port to check
|
||||
|
||||
hc_type
|
||||
|
||||
Healthcheck type. HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP
|
||||
|
||||
resource_path
|
||||
|
||||
Path to check
|
||||
|
||||
string_match
|
||||
|
||||
If hc_type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the
|
||||
response body from the specified resource
|
||||
|
||||
request_interval
|
||||
|
||||
The number of seconds between the time that Amazon Route 53 gets a response from
|
||||
your endpoint and the time that it sends the next health-check request.
|
||||
|
||||
failure_threshold
|
||||
|
||||
The number of consecutive health checks that an endpoint must pass or fail for
|
||||
Amazon Route 53 to change the current status of the endpoint from unhealthy to
|
||||
healthy or vice versa.
|
||||
|
||||
region
|
||||
|
||||
Region endpoint to connect to
|
||||
|
||||
key
|
||||
|
||||
AWS key
|
||||
|
||||
keyid
|
||||
|
||||
AWS keyid
|
||||
|
||||
profile
|
||||
|
||||
AWS pillar profile
|
||||
|
||||
CLI Example::
|
||||
|
||||
salt myminion boto_route53.create_healthcheck 192.168.0.1
|
||||
salt myminion boto_route53.create_healthcheck 192.168.0.1 port=443 hc_type=HTTPS \
|
||||
resource_path=/ fqdn=blog.saltstack.furniture
|
||||
'''
|
||||
if fqdn is None and ip_addr is None:
|
||||
msg = 'One of the following must be specified: fqdn or ip_addr'
|
||||
log.error(msg)
|
||||
return {'error': msg}
|
||||
hc_ = boto.route53.healthcheck.HealthCheck(ip_addr,
|
||||
port,
|
||||
hc_type,
|
||||
resource_path,
|
||||
fqdn=fqdn,
|
||||
string_match=string_match,
|
||||
request_interval=request_interval,
|
||||
failure_threshold=failure_threshold)
|
||||
|
||||
if region is None:
|
||||
region = 'universal'
|
||||
|
||||
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
|
||||
|
||||
while error_retries > 0:
|
||||
try:
|
||||
return {'result': conn.create_health_check(hc_)}
|
||||
except DNSServerError as exc:
|
||||
log.debug(exc)
|
||||
if retry_on_errors:
|
||||
if 'Throttling' == exc.code:
|
||||
log.debug('Throttled by AWS API.')
|
||||
elif 'PriorRequestNotComplete' == exc.code:
|
||||
log.debug('The request was rejected by AWS API.\
|
||||
Route 53 was still processing a prior request')
|
||||
time.sleep(3)
|
||||
error_retries -= 1
|
||||
continue
|
||||
return {'error': __utils__['boto.get_error'](exc)}
|
||||
return False
|
||||
|
||||
|
||||
def delete_zone(zone, region=None, key=None, keyid=None, profile=None):
|
||||
'''
|
||||
Delete a Route53 hosted zone.
|
||||
@ -651,44 +754,71 @@ def delete_record(name, zone, record_type, identifier=None, all_records=False,
|
||||
raise e
|
||||
|
||||
|
||||
def _wait_for_sync(status, conn, wait_for_sync):
|
||||
if not wait_for_sync:
|
||||
def _try_func(conn, func, **args):
|
||||
tries = 30
|
||||
while True:
|
||||
try:
|
||||
return getattr(conn, func)(**args)
|
||||
except AttributeError as e:
|
||||
# Don't include **args in log messages - security concern.
|
||||
log.error('Function `{0}()` not found for AWS connection object '
|
||||
'{1}'.format(func, conn))
|
||||
return None
|
||||
except DNSServerError as e:
|
||||
if tries and e.code == 'Throttling':
|
||||
log.debug('Throttled by AWS API. Will retry in 5 seconds')
|
||||
time.sleep(5)
|
||||
tries -= 1
|
||||
continue
|
||||
log.error('Failed calling {0}(): {1}'.format(func, str(e)))
|
||||
return None
|
||||
|
||||
|
||||
def _wait_for_sync(status, conn, wait=True):
|
||||
### Wait should be a bool or an integer
|
||||
if wait is True:
|
||||
wait = 600
|
||||
if not wait:
|
||||
return True
|
||||
retry = 10
|
||||
i = 0
|
||||
while i < retry:
|
||||
log.info('Getting route53 status (attempt {0})'.format(i + 1))
|
||||
orig_wait = wait
|
||||
log.info('Waiting up to {0} seconds for Route53 changes to synchronize'.format(orig_wait))
|
||||
while wait > 0:
|
||||
change = conn.get_change(status)
|
||||
log.debug(change.GetChangeResponse.ChangeInfo.Status)
|
||||
if change.GetChangeResponse.ChangeInfo.Status == 'INSYNC':
|
||||
current = change.GetChangeResponse.ChangeInfo.Status
|
||||
if current == 'INSYNC':
|
||||
return True
|
||||
i = i + 1
|
||||
time.sleep(20)
|
||||
log.error('Timed out waiting for Route53 status update.')
|
||||
sleep = wait if wait % 60 == wait else 60
|
||||
log.info('Sleeping {0} seconds waiting for changes to synch (current status {1})'.format(
|
||||
sleep, current))
|
||||
time.sleep(sleep)
|
||||
wait -= sleep
|
||||
continue
|
||||
log.error('Route53 changes not synced after {0} seconds.'.format(orig_wait))
|
||||
return False
|
||||
|
||||
|
||||
def create_hosted_zone(domain_name, caller_ref=None, comment='',
|
||||
private_zone=False, vpc_id=None, vpc_name=None,
|
||||
vpc_region=None, region=None, key=None, keyid=None,
|
||||
def create_hosted_zone(domain_name, caller_ref=None, comment='', private_zone=False, vpc_id=None,
|
||||
vpc_name=None, vpc_region=None, region=None, key=None, keyid=None,
|
||||
profile=None):
|
||||
'''
|
||||
Create a new Route53 Hosted Zone. Returns a Python data structure with
|
||||
information about the newly created Hosted Zone.
|
||||
Create a new Route53 Hosted Zone. Returns a Python data structure with information about the
|
||||
newly created Hosted Zone.
|
||||
|
||||
domain_name
|
||||
The name of the domain. This should be a fully-specified domain, and
|
||||
should terminate with a period. This is the name you have registered
|
||||
with your DNS registrar. It is also the name you will delegate from your
|
||||
registrar to the Amazon Route 53 delegation servers returned in response
|
||||
The name of the domain. This must be fully-qualified, terminating with a period. This is
|
||||
the name you have registered with your domain registrar. It is also the name you will
|
||||
delegate from your registrar to the Amazon Route 53 delegation servers returned in response
|
||||
to this request.
|
||||
|
||||
caller_ref
|
||||
A unique string that identifies the request and that allows
|
||||
create_hosted_zone() calls to be retried without the risk of executing
|
||||
the operation twice. You want to provide this where possible, since
|
||||
additional calls while the first is in PENDING status will be accepted
|
||||
and can lead to multiple copies of the zone being created in Route53.
|
||||
A unique string that identifies the request and that allows create_hosted_zone() calls to
|
||||
be retried without the risk of executing the operation twice. It can take several minutes
|
||||
for the change to replicate globally, and change from PENDING to INSYNC status. Thus it's
|
||||
best to provide some value for this where possible, since duplicate calls while the first
|
||||
is in PENDING status will be accepted and can lead to multiple copies of the zone being
|
||||
created. On the other hand, if a zone is created with a given caller_ref, then deleted,
|
||||
a second attempt to create a zone with the same caller_ref will fail until that caller_ref
|
||||
is flushed from the Route53 system, which can take upwards of 24 hours.
|
||||
|
||||
comment
|
||||
Any comments you want to include about the hosted zone.
|
||||
@ -697,33 +827,30 @@ def create_hosted_zone(domain_name, caller_ref=None, comment='',
|
||||
Set True if creating a private hosted zone.
|
||||
|
||||
vpc_id
|
||||
When creating a private hosted zone, either the VPC ID or VPC Name to
|
||||
associate with is required. Exclusive with vpe_name. Ignored if passed
|
||||
for a non-private zone.
|
||||
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
|
||||
required. Exclusive with vpe_name. Ignored when creating a non-private zone.
|
||||
|
||||
vpc_name
|
||||
When creating a private hosted zone, either the VPC ID or VPC Name to
|
||||
associate with is required. Exclusive with vpe_id. Ignored if passed
|
||||
for a non-private zone.
|
||||
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
|
||||
required. Exclusive with vpe_id. Ignored when creating a non-private zone.
|
||||
|
||||
vpc_region
|
||||
When creating a private hosted zone, the region of the associated VPC is
|
||||
required. If not provided, an effort will be made to determine it from
|
||||
vpc_id or vpc_name, if possible. If this fails, you'll need to provide
|
||||
an explicit value for this option. Ignored if passed for a non-private
|
||||
zone.
|
||||
When creating a private hosted zone, the region of the associated VPC is required. If not
|
||||
provided, an effort will be made to determine it from vpc_id or vpc_name, where possible.
|
||||
If this fails, you'll need to provide an explicit value for this option. Ignored when
|
||||
creating a non-private zone.
|
||||
|
||||
region
|
||||
Region endpoint to connect to
|
||||
Region endpoint to connect to.
|
||||
|
||||
key
|
||||
AWS key to bind with
|
||||
AWS key to bind with.
|
||||
|
||||
keyid
|
||||
AWS keyid to bind with
|
||||
AWS keyid to bind with.
|
||||
|
||||
profile
|
||||
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid
|
||||
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
|
||||
|
||||
CLI Example::
|
||||
|
||||
@ -776,24 +903,15 @@ def create_hosted_zone(domain_name, caller_ref=None, comment='',
|
||||
log.info('Options vpc_id, vpc_name, and vpc_region are ignored '
|
||||
'when creating non-private zones.')
|
||||
|
||||
retries = 10
|
||||
while retries:
|
||||
try:
|
||||
# Crazy layers of dereference...
|
||||
r = conn.create_hosted_zone(**args)
|
||||
r = r.CreateHostedZoneResponse.__dict__ if hasattr(r,
|
||||
'CreateHostedZoneResponse') else {}
|
||||
return r.get('parent', {}).get('CreateHostedZoneResponse')
|
||||
except DNSServerError as e:
|
||||
if retries:
|
||||
if 'Throttling' == e.code:
|
||||
log.debug('Throttled by AWS API.')
|
||||
elif 'PriorRequestNotComplete' == e.code:
|
||||
log.debug('The request was rejected by AWS API.\
|
||||
Route 53 was still processing a prior request')
|
||||
time.sleep(3)
|
||||
retries -= 1
|
||||
continue
|
||||
log.error('Failed to create hosted zone {0}: {1}'.format(
|
||||
domain_name, e.message))
|
||||
return None
|
||||
r = _try_func(conn, 'create_hosted_zone', **args)
|
||||
if r is None:
|
||||
log.error('Failed to create hosted zone {0}'.format(domain_name))
|
||||
return None
|
||||
r = r.get('CreateHostedZoneResponse', {})
|
||||
# Pop it since it'll be irrelevant by the time we return
|
||||
status = r.pop('ChangeInfo', {}).get('Id', '').replace('/change/', '')
|
||||
synced = _wait_for_sync(status, conn, wait=600)
|
||||
if not synced:
|
||||
log.error('Hosted zone {0} not synced after 600 seconds.'.format(domain_name))
|
||||
return None
|
||||
return r
|
||||
|
@ -4,6 +4,9 @@ Cassandra Database Module
|
||||
|
||||
.. versionadded:: 2015.5.0
|
||||
|
||||
This module works with Cassandra v2 and v3 and hence generates
|
||||
queries based on the internal schema of said version.
|
||||
|
||||
:depends: DataStax Python Driver for Apache Cassandra
|
||||
https://github.com/datastax/python-driver
|
||||
pip install cassandra-driver
|
||||
@ -81,6 +84,7 @@ Cassandra Database Module
|
||||
from __future__ import absolute_import
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
import ssl
|
||||
|
||||
# Import Salt Libs
|
||||
@ -300,7 +304,7 @@ def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=Non
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cql_query "SELECT * FROM users_by_name WHERE first_name = 'jane'"
|
||||
salt 'cassandra-server' cassandra_cql.cql_query "SELECT * FROM users_by_name WHERE first_name = 'jane'"
|
||||
'''
|
||||
try:
|
||||
cluster, session = _connect(contact_points=contact_points, port=port, cql_user=cql_user, cql_pass=cql_pass)
|
||||
@ -314,6 +318,26 @@ def cql_query(query, contact_points=None, port=None, cql_user=None, cql_pass=Non
|
||||
session.row_factory = dict_factory
|
||||
ret = []
|
||||
|
||||
# Cassandra changed their internal schema from v2 to v3
|
||||
# If the query contains a dictionary sorted by versions
|
||||
# Find the query for the current cluster version.
|
||||
# https://issues.apache.org/jira/browse/CASSANDRA-6717
|
||||
if isinstance(query, dict):
|
||||
cluster_version = version(contact_points=contact_points,
|
||||
port=port,
|
||||
cql_user=cql_user,
|
||||
cql_pass=cql_pass)
|
||||
match = re.match(r'^(\d+)\.(\d+)(?:\.(\d+))?', cluster_version)
|
||||
major, minor, point = match.groups()
|
||||
# try to find the specific version in the query dictionary
|
||||
# then try the major version
|
||||
# otherwise default to the highest version number
|
||||
try:
|
||||
query = query[cluster_version]
|
||||
except KeyError:
|
||||
query = query.get(major, max(query))
|
||||
log.debug('New query is: {0}'.format(query))
|
||||
|
||||
try:
|
||||
results = session.execute(query)
|
||||
except BaseException as e:
|
||||
@ -548,8 +572,10 @@ def list_keyspaces(contact_points=None, port=None, cql_user=None, cql_pass=None)
|
||||
|
||||
salt 'minion1' cassandra_cql.list_keyspaces contact_points=minion1 port=9000
|
||||
'''
|
||||
query = '''select keyspace_name
|
||||
from system.schema_keyspaces;'''
|
||||
query = {
|
||||
'2': 'select keyspace_name from system.schema_keyspaces;',
|
||||
'3': 'select keyspace_name from system_schema.keyspaces;',
|
||||
}
|
||||
|
||||
ret = {}
|
||||
|
||||
@ -594,9 +620,12 @@ def list_column_families(keyspace=None, contact_points=None, port=None, cql_user
|
||||
'''
|
||||
where_clause = "where keyspace_name = '{0}'".format(keyspace) if keyspace else ""
|
||||
|
||||
query = '''select columnfamily_name
|
||||
from system.schema_columnfamilies
|
||||
{0};'''.format(where_clause)
|
||||
query = {
|
||||
'2': '''select columnfamily_name from system.schema_columnfamilies
|
||||
{0};'''.format(where_clause),
|
||||
'3': '''select column_name from system_schema.columns
|
||||
{0};'''.format(where_clause),
|
||||
}
|
||||
|
||||
ret = {}
|
||||
|
||||
@ -634,14 +663,13 @@ def keyspace_exists(keyspace, contact_points=None, port=None, cql_user=None, cql
|
||||
.. code-block:: bash
|
||||
|
||||
salt 'minion1' cassandra_cql.keyspace_exists keyspace=system
|
||||
|
||||
salt 'minion1' cassandra_cql.list_keyspaces keyspace=system contact_points=minion1
|
||||
'''
|
||||
# Only project the keyspace_name to make the query efficien.
|
||||
# Like an echo
|
||||
query = '''select keyspace_name
|
||||
from system.schema_keyspaces
|
||||
where keyspace_name = '{0}';'''.format(keyspace)
|
||||
query = {
|
||||
'2': '''select keyspace_name from system.schema_keyspaces
|
||||
where keyspace_name = '{0}';'''.format(keyspace),
|
||||
'3': '''select keyspace_name from system_schema.keyspaces
|
||||
where keyspace_name = '{0}';'''.format(keyspace),
|
||||
}
|
||||
|
||||
try:
|
||||
ret = cql_query(query, contact_points, port, cql_user, cql_pass)
|
||||
|
@ -369,7 +369,6 @@ def install(name,
|
||||
force_x86=False,
|
||||
package_args=None,
|
||||
allow_multiple=False,
|
||||
no_progress=False,
|
||||
execution_timeout=None):
|
||||
'''
|
||||
Instructs Chocolatey to install a package.
|
||||
@ -426,9 +425,6 @@ def install(name,
|
||||
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
no_progress
|
||||
Do not show download progress percentages. Defaults to False.
|
||||
|
||||
execution_timeout (str):
|
||||
Chocolatey execution timeout value you want to pass to the installation process. Default is None.
|
||||
|
||||
@ -472,11 +468,13 @@ def install(name,
|
||||
cmd.extend(['--packageparameters', package_args])
|
||||
if allow_multiple:
|
||||
cmd.append('--allow-multiple')
|
||||
if no_progress:
|
||||
cmd.append(_no_progress(__context__))
|
||||
if execution_timeout:
|
||||
cmd.extend(['--execution-timeout', execution_timeout])
|
||||
|
||||
# Salt doesn't need to see the progress
|
||||
cmd.extend(_no_progress(__context__))
|
||||
cmd.extend(_yes(__context__))
|
||||
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
if result['retcode'] not in [0, 1641, 3010]:
|
||||
@ -746,13 +744,12 @@ def upgrade(name,
|
||||
install_args=None,
|
||||
override_args=False,
|
||||
force_x86=False,
|
||||
package_args=None,
|
||||
no_progress=False):
|
||||
package_args=None):
|
||||
'''
|
||||
.. versionadded:: 2016.3.4
|
||||
|
||||
Instructs Chocolatey to upgrade packages on the system. (update is being
|
||||
deprecated)
|
||||
deprecated). This command will install the package if not installed.
|
||||
|
||||
Args:
|
||||
|
||||
@ -790,9 +787,6 @@ def upgrade(name,
|
||||
package_args
|
||||
A list of arguments you want to pass to the package
|
||||
|
||||
no_progress
|
||||
Do not show download progress percentages. Defaults to False.
|
||||
|
||||
Returns:
|
||||
str: Results of the ``chocolatey`` command
|
||||
|
||||
@ -807,7 +801,7 @@ def upgrade(name,
|
||||
choc_path = _find_chocolatey(__context__, __salt__)
|
||||
cmd = [choc_path, 'upgrade', name]
|
||||
if version:
|
||||
cmd.extend(['-version', version])
|
||||
cmd.extend(['--version', version])
|
||||
if source:
|
||||
cmd.extend(['--source', source])
|
||||
if salt.utils.data.is_true(force):
|
||||
@ -822,9 +816,9 @@ def upgrade(name,
|
||||
cmd.append('--forcex86')
|
||||
if package_args:
|
||||
cmd.extend(['--packageparameters', package_args])
|
||||
if no_progress:
|
||||
cmd.append(_no_progress(__context__))
|
||||
|
||||
# Salt doesn't need to see the progress
|
||||
cmd.extend(_no_progress(__context__))
|
||||
cmd.extend(_yes(__context__))
|
||||
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
@ -836,7 +830,7 @@ def upgrade(name,
|
||||
return result['stdout']
|
||||
|
||||
|
||||
def update(name, source=None, pre_versions=False, no_progress=False):
|
||||
def update(name, source=None, pre_versions=False):
|
||||
'''
|
||||
Instructs Chocolatey to update packages on the system.
|
||||
|
||||
@ -851,9 +845,6 @@ def update(name, source=None, pre_versions=False, no_progress=False):
|
||||
pre_versions
|
||||
Include pre-release packages in comparison. Defaults to False.
|
||||
|
||||
no_progress
|
||||
Do not show download progress percentages. Defaults to False.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -872,9 +863,11 @@ def update(name, source=None, pre_versions=False, no_progress=False):
|
||||
cmd.extend(['--source', source])
|
||||
if salt.utils.data.is_true(pre_versions):
|
||||
cmd.append('--prerelease')
|
||||
if no_progress:
|
||||
cmd.append(_no_progress(__context__))
|
||||
|
||||
# Salt doesn't need to see the progress
|
||||
cmd.extend(_no_progress(__context__))
|
||||
cmd.extend(_yes(__context__))
|
||||
|
||||
result = __salt__['cmd.run_all'](cmd, python_shell=False)
|
||||
|
||||
if result['retcode'] not in [0, 1641, 3010]:
|
||||
|
@ -5,7 +5,7 @@ A module for shelling out.
|
||||
Keep in mind that this module is insecure, in that it can give whomever has
|
||||
access to the master root execution access to all salt minions.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
# Import python libs
|
||||
import functools
|
||||
@ -149,7 +149,7 @@ def _render_cmd(cmd, cwd, template, saltenv='base', pillarenv=None, pillar_overr
|
||||
# write out path to temp file
|
||||
tmp_path_fn = salt.utils.files.mkstemp()
|
||||
with salt.utils.files.fopen(tmp_path_fn, 'w+') as fp_:
|
||||
fp_.write(contents)
|
||||
fp_.write(salt.utils.stringutils.to_str(contents))
|
||||
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
|
||||
tmp_path_fn,
|
||||
to_str=True,
|
||||
@ -223,7 +223,7 @@ def _check_avail(cmd):
|
||||
Check to see if the given command can be run
|
||||
'''
|
||||
if isinstance(cmd, list):
|
||||
cmd = ' '.join([str(x) if not isinstance(x, six.string_types) else x
|
||||
cmd = ' '.join([six.text_type(x) if not isinstance(x, six.string_types) else x
|
||||
for x in cmd])
|
||||
bret = True
|
||||
wret = False
|
||||
@ -372,7 +372,7 @@ def _run(cmd,
|
||||
# requested. The command output is what will be controlled by the
|
||||
# 'loglevel' parameter.
|
||||
msg = (
|
||||
u'Executing command {0}{1}{0} {2}in directory \'{3}\'{4}'.format(
|
||||
'Executing command {0}{1}{0} {2}in directory \'{3}\'{4}'.format(
|
||||
'\'' if not isinstance(cmd, list) else '',
|
||||
_get_stripped(cmd),
|
||||
'as user \'{0}\' '.format(runas) if runas else '',
|
||||
@ -433,7 +433,7 @@ def _run(cmd,
|
||||
import itertools
|
||||
env_runas = dict(itertools.izip(*[iter(env_encoded.split(b'\0'))]*2))
|
||||
elif six.PY3:
|
||||
if isinstance(env_encoded, str):
|
||||
if isinstance(env_encoded, str): # future lint: disable=blacklisted-function
|
||||
env_encoded = env_encoded.encode(__salt_system_encoding__)
|
||||
env_runas = dict(list(zip(*[iter(env_encoded.split(b'\0'))]*2)))
|
||||
|
||||
@ -491,7 +491,7 @@ def _run(cmd,
|
||||
kwargs = {'cwd': cwd,
|
||||
'shell': python_shell,
|
||||
'env': run_env,
|
||||
'stdin': str(stdin) if stdin is not None else stdin,
|
||||
'stdin': six.text_type(stdin) if stdin is not None else stdin,
|
||||
'stdout': stdout,
|
||||
'stderr': stderr,
|
||||
'with_communicate': with_communicate,
|
||||
@ -500,7 +500,7 @@ def _run(cmd,
|
||||
}
|
||||
|
||||
if umask is not None:
|
||||
_umask = str(umask).lstrip('0')
|
||||
_umask = six.text_type(umask).lstrip('0')
|
||||
|
||||
if _umask == '':
|
||||
msg = 'Zero umask is not allowed.'
|
||||
@ -566,7 +566,7 @@ def _run(cmd,
|
||||
try:
|
||||
proc.run()
|
||||
except TimedProcTimeoutError as exc:
|
||||
ret['stdout'] = str(exc)
|
||||
ret['stdout'] = six.text_type(exc)
|
||||
ret['stderr'] = ''
|
||||
ret['retcode'] = None
|
||||
ret['pid'] = proc.process.pid
|
||||
@ -577,7 +577,7 @@ def _run(cmd,
|
||||
try:
|
||||
out = proc.stdout.decode(__salt_system_encoding__)
|
||||
except AttributeError:
|
||||
out = u''
|
||||
out = ''
|
||||
except UnicodeDecodeError:
|
||||
log.error('UnicodeDecodeError while decoding output of cmd {0}'.format(cmd))
|
||||
out = proc.stdout.decode(__salt_system_encoding__, 'replace')
|
||||
@ -585,7 +585,7 @@ def _run(cmd,
|
||||
try:
|
||||
err = proc.stderr.decode(__salt_system_encoding__)
|
||||
except AttributeError:
|
||||
err = u''
|
||||
err = ''
|
||||
except UnicodeDecodeError:
|
||||
log.error('UnicodeDecodeError while decoding error of cmd {0}'.format(cmd))
|
||||
err = proc.stderr.decode(__salt_system_encoding__, 'replace')
|
||||
@ -998,10 +998,10 @@ def run(cmd,
|
||||
log.error(log_callback(msg))
|
||||
if raise_err:
|
||||
raise CommandExecutionError(
|
||||
log_callback(ret[u'stdout'] if not hide_output else u'')
|
||||
log_callback(ret['stdout'] if not hide_output else '')
|
||||
)
|
||||
log.log(lvl, u'output: %s', log_callback(ret[u'stdout']))
|
||||
return ret[u'stdout'] if not hide_output else u''
|
||||
log.log(lvl, 'output: %s', log_callback(ret['stdout']))
|
||||
return ret['stdout'] if not hide_output else ''
|
||||
|
||||
|
||||
def shell(cmd,
|
||||
@ -1837,9 +1837,9 @@ def run_all(cmd,
|
||||
)
|
||||
log.error(log_callback(msg))
|
||||
if ret['stdout']:
|
||||
log.log(lvl, u'stdout: {0}'.format(log_callback(ret['stdout'])))
|
||||
log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout'])))
|
||||
if ret['stderr']:
|
||||
log.log(lvl, u'stderr: {0}'.format(log_callback(ret['stderr'])))
|
||||
log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
|
||||
if ret['retcode']:
|
||||
log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
|
||||
|
||||
@ -2275,7 +2275,7 @@ def script(source,
|
||||
if not salt.utils.platform.is_windows():
|
||||
os.chmod(path, 320)
|
||||
os.chown(path, __salt__['file.user_to_uid'](runas), -1)
|
||||
ret = _run(path + ' ' + str(args) if args else path,
|
||||
ret = _run(path + ' ' + six.text_type(args) if args else path,
|
||||
cwd=cwd,
|
||||
stdin=stdin,
|
||||
output_loglevel=output_loglevel,
|
||||
@ -2550,7 +2550,7 @@ def exec_code_all(lang, code, cwd=None, args=None, **kwargs):
|
||||
codefile = salt.utils.files.mkstemp()
|
||||
|
||||
with salt.utils.files.fopen(codefile, 'w+t', binary=False) as fp_:
|
||||
fp_.write(code)
|
||||
fp_.write(salt.utils.stringutils.to_str(code))
|
||||
|
||||
if powershell:
|
||||
cmd = [lang, "-File", codefile]
|
||||
@ -2751,7 +2751,7 @@ def run_chroot(root,
|
||||
sh_ = '/bin/bash'
|
||||
|
||||
if isinstance(cmd, (list, tuple)):
|
||||
cmd = ' '.join([str(i) for i in cmd])
|
||||
cmd = ' '.join([six.text_type(i) for i in cmd])
|
||||
cmd = 'chroot {0} {1} -c {2}'.format(root, sh_, _cmd_quote(cmd))
|
||||
|
||||
run_func = __context__.pop('cmd.run_chroot.func', run_all)
|
||||
@ -2795,7 +2795,7 @@ def run_chroot(root,
|
||||
__salt__['mount.umount'](os.path.join(root, 'proc'))
|
||||
__salt__['mount.umount'](os.path.join(root, 'dev'))
|
||||
if hide_output:
|
||||
ret[u'stdout'] = ret[u'stderr'] = u''
|
||||
ret['stdout'] = ret['stderr'] = ''
|
||||
return ret
|
||||
|
||||
|
||||
@ -2811,7 +2811,8 @@ def _is_valid_shell(shell):
|
||||
if os.path.exists(shells):
|
||||
try:
|
||||
with salt.utils.files.fopen(shells, 'r') as shell_fp:
|
||||
lines = shell_fp.read().splitlines()
|
||||
lines = [salt.utils.stringutils.to_unicode(x)
|
||||
for x in shell_fp.read().splitlines()]
|
||||
for line in lines:
|
||||
if line.startswith('#'):
|
||||
continue
|
||||
@ -2843,7 +2844,8 @@ def shells():
|
||||
if os.path.exists(shells_fn):
|
||||
try:
|
||||
with salt.utils.files.fopen(shells_fn, 'r') as shell_fp:
|
||||
lines = shell_fp.read().splitlines()
|
||||
lines = [salt.utils.stringutils.to_unicode(x)
|
||||
for x in shell_fp.read().splitlines()]
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if line.startswith('#'):
|
||||
@ -3618,6 +3620,12 @@ def run_bg(cmd,
|
||||
Note that ``env`` represents the environment variables for the command, and
|
||||
should be formatted as a dict, or a YAML string which resolves to a dict.
|
||||
|
||||
.. note::
|
||||
|
||||
If the init system is systemd and the backgrounded task should run even if the salt-minion process
|
||||
is restarted, prepend ``systemd-run --scope`` to the command. This will reparent the process in its
|
||||
own scope separate from salt-minion, and will not be affected by restarting the minion service.
|
||||
|
||||
:param str cmd: The command to run. ex: 'ls -lart /home'
|
||||
|
||||
:param str cwd: The current working directory to execute the command in.
|
||||
|
@ -17,6 +17,7 @@ import salt.fileclient
|
||||
import salt.utils.files
|
||||
import salt.utils.gzip_util
|
||||
import salt.utils.locales
|
||||
import salt.utils.path
|
||||
import salt.utils.templates
|
||||
import salt.utils.url
|
||||
import salt.crypt
|
||||
@ -875,7 +876,7 @@ def push_dir(path, glob=None, upload_path=None):
|
||||
return push(path, upload_path=upload_path)
|
||||
else:
|
||||
filelist = []
|
||||
for root, _, files in os.walk(path):
|
||||
for root, _, files in salt.utils.path.os_walk(path):
|
||||
filelist += [os.path.join(root, tmpfile) for tmpfile in files]
|
||||
if glob is not None:
|
||||
filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)]
|
||||
|
@ -149,8 +149,24 @@ def _render_tab(lst):
|
||||
cron['cmd']
|
||||
)
|
||||
)
|
||||
for spec in lst['special']:
|
||||
ret.append('{0} {1}\n'.format(spec['spec'], spec['cmd']))
|
||||
for cron in lst['special']:
|
||||
if cron['comment'] is not None or cron['identifier'] is not None:
|
||||
comment = '#'
|
||||
if cron['comment']:
|
||||
comment += ' {0}'.format(
|
||||
cron['comment'].rstrip().replace('\n', '\n# '))
|
||||
if cron['identifier']:
|
||||
comment += ' {0}:{1}'.format(SALT_CRON_IDENTIFIER,
|
||||
cron['identifier'])
|
||||
|
||||
comment += '\n'
|
||||
ret.append(comment)
|
||||
ret.append('{0}{1} {2}\n'.format(
|
||||
cron['commented'] is True and '#DISABLED#' or '',
|
||||
cron['spec'],
|
||||
cron['cmd']
|
||||
)
|
||||
)
|
||||
return ret
|
||||
|
||||
|
||||
@ -319,7 +335,15 @@ def list_tab(user):
|
||||
continue
|
||||
dat['spec'] = comps[0]
|
||||
dat['cmd'] = ' '.join(comps[1:])
|
||||
dat['identifier'] = identifier
|
||||
dat['comment'] = comment
|
||||
dat['commented'] = False
|
||||
if commented_cron_job:
|
||||
dat['commented'] = True
|
||||
ret['special'].append(dat)
|
||||
identifier = None
|
||||
comment = None
|
||||
commented_cron_job = False
|
||||
elif line.startswith('#'):
|
||||
# It's a comment! Catch it!
|
||||
comment_line = line.lstrip('# ')
|
||||
@ -365,11 +389,17 @@ def list_tab(user):
|
||||
ret['pre'].append(line)
|
||||
return ret
|
||||
|
||||
|
||||
# For consistency's sake
|
||||
ls = salt.utils.functools.alias_function(list_tab, 'ls')
|
||||
|
||||
|
||||
def set_special(user, special, cmd):
|
||||
def set_special(user,
|
||||
special,
|
||||
cmd,
|
||||
commented=False,
|
||||
comment=None,
|
||||
identifier=None):
|
||||
'''
|
||||
Set up a special command in the crontab.
|
||||
|
||||
@ -381,11 +411,60 @@ def set_special(user, special, cmd):
|
||||
'''
|
||||
lst = list_tab(user)
|
||||
for cron in lst['special']:
|
||||
if special == cron['spec'] and cmd == cron['cmd']:
|
||||
cid = _cron_id(cron)
|
||||
if _cron_matched(cron, cmd, identifier):
|
||||
test_setted_id = (
|
||||
cron['identifier'] is None
|
||||
and SALT_CRON_NO_IDENTIFIER
|
||||
or cron['identifier'])
|
||||
tests = [(cron['comment'], comment),
|
||||
(cron['commented'], commented),
|
||||
(identifier, test_setted_id),
|
||||
(cron['spec'], special)]
|
||||
if cid or identifier:
|
||||
tests.append((cron['cmd'], cmd))
|
||||
if any([_needs_change(x, y) for x, y in tests]):
|
||||
rm_special(user, cmd, identifier=cid)
|
||||
|
||||
# Use old values when setting the new job if there was no
|
||||
# change needed for a given parameter
|
||||
if not _needs_change(cron['spec'], special):
|
||||
special = cron['spec']
|
||||
if not _needs_change(cron['commented'], commented):
|
||||
commented = cron['commented']
|
||||
if not _needs_change(cron['comment'], comment):
|
||||
comment = cron['comment']
|
||||
if not _needs_change(cron['cmd'], cmd):
|
||||
cmd = cron['cmd']
|
||||
if (
|
||||
cid == SALT_CRON_NO_IDENTIFIER
|
||||
):
|
||||
if identifier:
|
||||
cid = identifier
|
||||
if (
|
||||
cid == SALT_CRON_NO_IDENTIFIER
|
||||
and cron['identifier'] is None
|
||||
):
|
||||
cid = None
|
||||
cron['identifier'] = cid
|
||||
if not cid or (
|
||||
cid and not _needs_change(cid, identifier)
|
||||
):
|
||||
identifier = cid
|
||||
jret = set_special(user, special, cmd, commented=commented,
|
||||
comment=comment, identifier=identifier)
|
||||
if jret == 'new':
|
||||
return 'updated'
|
||||
else:
|
||||
return jret
|
||||
return 'present'
|
||||
spec = {'spec': special,
|
||||
'cmd': cmd}
|
||||
lst['special'].append(spec)
|
||||
cron = {'spec': special,
|
||||
'cmd': cmd,
|
||||
'identifier': identifier,
|
||||
'comment': comment,
|
||||
'commented': commented}
|
||||
lst['special'].append(cron)
|
||||
|
||||
comdat = _write_cron_lines(user, _render_tab(lst))
|
||||
if comdat['retcode']:
|
||||
# Failed to commit, return the error
|
||||
@ -538,7 +617,7 @@ def set_job(user,
|
||||
return 'new'
|
||||
|
||||
|
||||
def rm_special(user, special, cmd):
|
||||
def rm_special(user, cmd, special=None, identifier=None):
|
||||
'''
|
||||
Remove a special cron job for a specified user.
|
||||
|
||||
@ -546,22 +625,28 @@ def rm_special(user, special, cmd):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' cron.rm_job root @hourly /usr/bin/foo
|
||||
salt '*' cron.rm_special root /usr/bin/foo
|
||||
'''
|
||||
lst = list_tab(user)
|
||||
ret = 'absent'
|
||||
rm_ = None
|
||||
for ind in range(len(lst['special'])):
|
||||
if lst['special'][ind]['cmd'] == cmd and \
|
||||
lst['special'][ind]['spec'] == special:
|
||||
lst['special'].pop(ind)
|
||||
rm_ = ind
|
||||
if rm_ is not None:
|
||||
break
|
||||
if _cron_matched(lst['special'][ind], cmd, identifier=identifier):
|
||||
if special is None:
|
||||
# No special param was specified
|
||||
rm_ = ind
|
||||
else:
|
||||
if lst['special'][ind]['spec'] == special:
|
||||
rm_ = ind
|
||||
if rm_ is not None:
|
||||
lst['special'].pop(rm_)
|
||||
ret = 'removed'
|
||||
comdat = _write_cron_lines(user, _render_tab(lst))
|
||||
if comdat['retcode']:
|
||||
# Failed to commit
|
||||
return comdat['stderr']
|
||||
comdat = _write_cron_lines(user, _render_tab(lst))
|
||||
if comdat['retcode']:
|
||||
# Failed to commit, return the error
|
||||
return comdat['stderr']
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -100,7 +100,7 @@ Detailed Function Documentation
|
||||
-------------------------------
|
||||
'''
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
@ -108,6 +108,9 @@ import os
|
||||
import re
|
||||
|
||||
import salt.utils.files
|
||||
import salt.utils.stringutils
|
||||
|
||||
from salt.ext import six
|
||||
|
||||
from operator import attrgetter
|
||||
try:
|
||||
@ -136,7 +139,7 @@ DEFAULT_DC_FILENAMES = ('docker-compose.yml', 'docker-compose.yaml')
|
||||
|
||||
def __virtual__():
|
||||
if HAS_DOCKERCOMPOSE:
|
||||
match = re.match(VERSION_RE, str(compose.__version__))
|
||||
match = re.match(VERSION_RE, six.text_type(compose.__version__))
|
||||
if match:
|
||||
version = tuple([int(x) for x in match.group(1).split('.')])
|
||||
if version >= MIN_DOCKERCOMPOSE:
|
||||
@ -201,7 +204,7 @@ def __read_docker_compose_file(file_path):
|
||||
file_name = os.path.basename(file_path)
|
||||
result = {file_name: ''}
|
||||
for line in fl:
|
||||
result[file_name] += line
|
||||
result[file_name] += salt.utils.stringutils.to_unicode(line)
|
||||
except EnvironmentError:
|
||||
return __standardize_result(False,
|
||||
'Could not read {0}'.format(file_path),
|
||||
@ -233,7 +236,7 @@ def __write_docker_compose(path, docker_compose):
|
||||
os.mkdir(dir_name)
|
||||
try:
|
||||
with salt.utils.files.fopen(file_path, 'w') as fl:
|
||||
fl.write(docker_compose)
|
||||
fl.write(salt.utils.stringutils.to_str(docker_compose))
|
||||
except EnvironmentError:
|
||||
return __standardize_result(False,
|
||||
'Could not write {0}'.format(file_path),
|
||||
|
@ -2350,13 +2350,13 @@ def version():
|
||||
ret = _client_wrapper('version')
|
||||
version_re = re.compile(VERSION_RE)
|
||||
if 'Version' in ret:
|
||||
match = version_re.match(str(ret['Version']))
|
||||
match = version_re.match(six.text_type(ret['Version']))
|
||||
if match:
|
||||
ret['VersionInfo'] = tuple(
|
||||
[int(x) for x in match.group(1).split('.')]
|
||||
)
|
||||
if 'ApiVersion' in ret:
|
||||
match = version_re.match(str(ret['ApiVersion']))
|
||||
match = version_re.match(six.text_type(ret['ApiVersion']))
|
||||
if match:
|
||||
ret['ApiVersionInfo'] = tuple(
|
||||
[int(x) for x in match.group(1).split('.')]
|
||||
@ -3935,9 +3935,9 @@ def build(path=None,
|
||||
)
|
||||
else:
|
||||
if not isinstance(repository, six.string_types):
|
||||
repository = str(repository)
|
||||
repository = six.text_type(repository)
|
||||
if not isinstance(tag, six.string_types):
|
||||
tag = str(tag)
|
||||
tag = six.text_type(tag)
|
||||
|
||||
# For the build function in the low-level API, the "tag" refers to the full
|
||||
# tag (e.g. myuser/myimage:mytag). This is different than in other
|
||||
@ -4075,9 +4075,9 @@ def commit(name,
|
||||
respository = image
|
||||
|
||||
if not isinstance(repository, six.string_types):
|
||||
repository = str(repository)
|
||||
repository = six.text_type(repository)
|
||||
if not isinstance(tag, six.string_types):
|
||||
tag = str(tag)
|
||||
tag = six.text_type(tag)
|
||||
|
||||
time_started = time.time()
|
||||
response = _client_wrapper(
|
||||
@ -4226,9 +4226,9 @@ def import_(source,
|
||||
respository = image
|
||||
|
||||
if not isinstance(repository, six.string_types):
|
||||
repository = str(repository)
|
||||
repository = six.text_type(repository)
|
||||
if not isinstance(tag, six.string_types):
|
||||
tag = str(tag)
|
||||
tag = six.text_type(tag)
|
||||
|
||||
path = __salt__['container_resource.cache_file'](source)
|
||||
|
||||
@ -4575,7 +4575,7 @@ def push(image,
|
||||
salt myminion docker.push myuser/mycontainer:mytag
|
||||
'''
|
||||
if not isinstance(image, six.string_types):
|
||||
image = str(image)
|
||||
image = six.text_type(image)
|
||||
|
||||
kwargs = {'stream': True,
|
||||
'client_timeout': client_timeout}
|
||||
@ -4846,6 +4846,8 @@ def save(name,
|
||||
|
||||
try:
|
||||
with __utils__['files.fopen'](saved_path, 'rb') as uncompressed:
|
||||
# No need to decode on read and encode on on write, since we're
|
||||
# reading and immediately writing out bytes.
|
||||
if compression != 'gzip':
|
||||
# gzip doesn't use a Compressor object, it uses a .open()
|
||||
# method to open the filehandle. If not using gzip, we need
|
||||
@ -4940,9 +4942,9 @@ def tag_(name, repository, tag='latest', force=False, image=None):
|
||||
respository = image
|
||||
|
||||
if not isinstance(repository, six.string_types):
|
||||
repository = str(repository)
|
||||
repository = six.text_type(repository)
|
||||
if not isinstance(tag, six.string_types):
|
||||
tag = str(tag)
|
||||
tag = six.text_type(tag)
|
||||
|
||||
image_id = inspect_image(name)['Id']
|
||||
response = _client_wrapper('tag',
|
||||
@ -5952,7 +5954,7 @@ def _script(name,
|
||||
|
||||
ret = run_all(
|
||||
name,
|
||||
path + ' ' + str(args) if args else path,
|
||||
path + ' ' + six.text_type(args) if args else path,
|
||||
exec_driver=exec_driver,
|
||||
stdin=stdin,
|
||||
python_shell=python_shell,
|
||||
|
@ -2,12 +2,13 @@
|
||||
'''
|
||||
Package support for the dummy proxy used by the test suite
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
# Import python libs
|
||||
import logging
|
||||
import salt.utils.data
|
||||
import salt.utils.platform
|
||||
from salt.ext import six
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
@ -96,9 +97,9 @@ def installed(name,
|
||||
p = __proxy__['dummy.package_status'](name)
|
||||
if version is None:
|
||||
if 'ret' in p:
|
||||
return str(p['ret'])
|
||||
return six.text_type(p['ret'])
|
||||
else:
|
||||
return True
|
||||
else:
|
||||
if p is not None:
|
||||
return version == str(p)
|
||||
return version == six.text_type(p)
|
||||
|
@ -4,7 +4,7 @@
|
||||
Provide the service module for the dummy proxy used in integration tests
|
||||
'''
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
import logging
|
||||
|
||||
# Import Salt libs
|
||||
|
@ -8,7 +8,7 @@ group, mode, and data
|
||||
# TODO: We should add the capability to do u+r type operations here
|
||||
# some time in the future
|
||||
|
||||
from __future__ import absolute_import, print_function
|
||||
from __future__ import absolute_import, print_function, unicode_literals
|
||||
|
||||
# Import python libs
|
||||
import datetime
|
||||
@ -123,12 +123,12 @@ def _binary_replace(old, new):
|
||||
new_isbin = not __utils__['files.is_text'](new)
|
||||
if any((old_isbin, new_isbin)):
|
||||
if all((old_isbin, new_isbin)):
|
||||
return u'Replace binary file'
|
||||
return 'Replace binary file'
|
||||
elif old_isbin:
|
||||
return u'Replace binary file with text file'
|
||||
return 'Replace binary file with text file'
|
||||
elif new_isbin:
|
||||
return u'Replace text file with binary file'
|
||||
return u''
|
||||
return 'Replace text file with binary file'
|
||||
return ''
|
||||
|
||||
|
||||
def _get_bkroot():
|
||||
@ -384,7 +384,7 @@ def set_mode(path, mode):
|
||||
'''
|
||||
path = os.path.expanduser(path)
|
||||
|
||||
mode = str(mode).lstrip('0Oo')
|
||||
mode = six.text_type(mode).lstrip('0Oo')
|
||||
if not mode:
|
||||
mode = '0'
|
||||
if not os.path.exists(path):
|
||||
@ -1090,8 +1090,8 @@ def sed(path,
|
||||
return False
|
||||
|
||||
# Mandate that before and after are strings
|
||||
before = str(before)
|
||||
after = str(after)
|
||||
before = six.text_type(before)
|
||||
after = six.text_type(after)
|
||||
before = _sed_esc(before, escape_all)
|
||||
after = _sed_esc(after, escape_all)
|
||||
limit = _sed_esc(limit, escape_all)
|
||||
@ -1140,8 +1140,8 @@ def sed_contains(path,
|
||||
if not os.path.exists(path):
|
||||
return False
|
||||
|
||||
before = _sed_esc(str(text), False)
|
||||
limit = _sed_esc(str(limit), False)
|
||||
before = _sed_esc(six.text_type(text), False)
|
||||
limit = _sed_esc(six.text_type(limit), False)
|
||||
options = '-n -r -e'
|
||||
if sys.platform == 'darwin':
|
||||
options = options.replace('-r', '-E')
|
||||
@ -1225,8 +1225,8 @@ def psed(path,
|
||||
|
||||
multi = bool(multi)
|
||||
|
||||
before = str(before)
|
||||
after = str(after)
|
||||
before = six.text_type(before)
|
||||
after = six.text_type(after)
|
||||
before = _sed_esc(before, escape_all)
|
||||
# The pattern to replace with does not need to be escaped!!!
|
||||
#after = _sed_esc(after, escape_all)
|
||||
@ -1238,9 +1238,29 @@ def psed(path,
|
||||
with salt.utils.files.fopen('{0}{1}'.format(path, backup), 'r') as ifile:
|
||||
if multi is True:
|
||||
for line in ifile.readline():
|
||||
ofile.write(_psed(line, before, after, limit, flags))
|
||||
ofile.write(
|
||||
salt.utils.stringutils.to_str(
|
||||
_psed(
|
||||
salt.utils.stringutils.to_unicode(line),
|
||||
before,
|
||||
after,
|
||||
limit,
|
||||
flags
|
||||
)
|
||||
)
|
||||
)
|
||||
else:
|
||||
ofile.write(_psed(ifile.read(), before, after, limit, flags))
|
||||
ofile.write(
|
||||
salt.utils.stringutils.to_str(
|
||||
_psed(
|
||||
salt.utils.stringutils.to_unicode(ifile.read()),
|
||||
before,
|
||||
after,
|
||||
limit,
|
||||
flags
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
RE_FLAG_TABLE = {'I': re.I,
|
||||
@ -1449,8 +1469,7 @@ def comment_line(path,
|
||||
# Loop through each line of the file and look for a match
|
||||
for line in r_file:
|
||||
# Is it in this line
|
||||
if six.PY3:
|
||||
line = line.decode(__salt_system_encoding__)
|
||||
line = salt.utils.stringutils.to_unicode(line)
|
||||
if re.match(regex, line):
|
||||
# Load lines into dictionaries, set found to True
|
||||
orig_file.append(line)
|
||||
@ -1492,8 +1511,7 @@ def comment_line(path,
|
||||
buffering=bufsize) as r_file:
|
||||
# Loop through each line of the file and look for a match
|
||||
for line in r_file:
|
||||
if six.PY3:
|
||||
line = line.decode(__salt_system_encoding__)
|
||||
line = salt.utils.stringutils.to_unicode(line)
|
||||
try:
|
||||
# Is it in this line
|
||||
if re.match(regex, line):
|
||||
@ -1505,9 +1523,7 @@ def comment_line(path,
|
||||
else:
|
||||
# Write the existing line (no change)
|
||||
wline = line
|
||||
if six.PY3:
|
||||
wline = wline.encode(__salt_system_encoding__)
|
||||
w_file.write(wline)
|
||||
w_file.write(salt.utils.stringutils.to_str(wline))
|
||||
except (OSError, IOError) as exc:
|
||||
raise CommandExecutionError(
|
||||
"Unable to write file '{0}'. Contents may "
|
||||
@ -1561,7 +1577,7 @@ def _get_flags(flags):
|
||||
if isinstance(flags, Iterable) and not isinstance(flags, Mapping):
|
||||
_flags_acc = []
|
||||
for flag in flags:
|
||||
_flag = getattr(re, str(flag).upper())
|
||||
_flag = getattr(re, six.text_type(flag).upper())
|
||||
|
||||
if not isinstance(_flag, six.integer_types):
|
||||
raise SaltInvocationError(
|
||||
@ -1861,7 +1877,7 @@ def line(path, content=None, match=None, mode=None, location=None,
|
||||
match = content
|
||||
|
||||
with salt.utils.files.fopen(path, mode='r') as fp_:
|
||||
body = fp_.read()
|
||||
body = salt.utils.stringutils.to_unicode(fp_.read())
|
||||
body_before = hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest()
|
||||
after = _regex_to_static(body, after)
|
||||
before = _regex_to_static(body, before)
|
||||
@ -2000,14 +2016,19 @@ def line(path, content=None, match=None, mode=None, location=None,
|
||||
if changed:
|
||||
if show_changes:
|
||||
with salt.utils.files.fopen(path, 'r') as fp_:
|
||||
path_content = _splitlines_preserving_trailing_newline(
|
||||
fp_.read())
|
||||
path_content = [salt.utils.stringutils.to_unicode(x)
|
||||
for x in fp_.read().splitlines(True)]
|
||||
changes_diff = ''.join(difflib.unified_diff(
|
||||
path_content, _splitlines_preserving_trailing_newline(body)))
|
||||
path_content,
|
||||
[salt.utils.stringutils.to_unicode(x)
|
||||
for x in body.splitlines(True)]
|
||||
))
|
||||
if __opts__['test'] is False:
|
||||
fh_ = None
|
||||
try:
|
||||
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
|
||||
# Make sure we match the file mode from salt.utils.files.fopen
|
||||
mode = 'wb' if six.PY2 and salt.utils.platform.is_windows() else 'w'
|
||||
fh_ = salt.utils.atomicfile.atomic_open(path, mode)
|
||||
fh_.write(body)
|
||||
finally:
|
||||
if fh_:
|
||||
@ -2204,16 +2225,15 @@ def replace(path,
|
||||
|
||||
# Avoid TypeErrors by forcing repl to be bytearray related to mmap
|
||||
# Replacement text may contains integer: 123 for example
|
||||
repl = salt.utils.stringutils.to_bytes(str(repl))
|
||||
repl = salt.utils.stringutils.to_bytes(six.text_type(repl))
|
||||
if not_found_content:
|
||||
not_found_content = salt.utils.stringutils.to_bytes(not_found_content)
|
||||
|
||||
found = False
|
||||
temp_file = None
|
||||
content = salt.utils.stringutils.to_str(not_found_content) if not_found_content and \
|
||||
(prepend_if_not_found or
|
||||
append_if_not_found) \
|
||||
else salt.utils.stringutils.to_str(repl)
|
||||
content = salt.utils.stringutils.to_unicode(not_found_content) \
|
||||
if not_found_content and (prepend_if_not_found or append_if_not_found) \
|
||||
else salt.utils.stringutils.to_unicode(repl)
|
||||
|
||||
try:
|
||||
# First check the whole file, determine whether to make the replacement
|
||||
@ -2384,8 +2404,8 @@ def replace(path,
|
||||
check_perms(path, None, pre_user, pre_group, pre_mode)
|
||||
|
||||
def get_changes():
|
||||
orig_file_as_str = [salt.utils.stringutils.to_str(x) for x in orig_file]
|
||||
new_file_as_str = [salt.utils.stringutils.to_str(x) for x in new_file]
|
||||
orig_file_as_str = [salt.utils.stringutils.to_unicode(x) for x in orig_file]
|
||||
new_file_as_str = [salt.utils.stringutils.to_unicode(x) for x in new_file]
|
||||
return ''.join(difflib.unified_diff(orig_file_as_str, new_file_as_str))
|
||||
|
||||
if show_changes:
|
||||
@ -2511,7 +2531,7 @@ def blockreplace(path,
|
||||
bufsize=1, mode='rb')
|
||||
for line in fi_file:
|
||||
|
||||
line = salt.utils.stringutils.to_str(line)
|
||||
line = salt.utils.stringutils.to_unicode(line)
|
||||
result = line
|
||||
|
||||
if marker_start in line:
|
||||
@ -2782,7 +2802,7 @@ def contains(path, text):
|
||||
if not os.path.exists(path):
|
||||
return False
|
||||
|
||||
stripped_text = str(text).strip()
|
||||
stripped_text = six.text_type(text).strip()
|
||||
try:
|
||||
with salt.utils.filebuffer.BufferedReader(path) as breader:
|
||||
for chunk in breader:
|
||||
@ -2818,6 +2838,7 @@ def contains_regex(path, regex, lchar=''):
|
||||
try:
|
||||
with salt.utils.files.fopen(path, 'r') as target:
|
||||
for line in target:
|
||||
line = salt.utils.stringutils.to_unicode(line)
|
||||
if lchar:
|
||||
line = line.lstrip(lchar)
|
||||
if re.search(regex, line):
|
||||
@ -2918,7 +2939,11 @@ def append(path, *args, **kwargs):
|
||||
# Append lines in text mode
|
||||
with salt.utils.files.fopen(path, 'a') as ofile:
|
||||
for new_line in args:
|
||||
ofile.write('{0}{1}'.format(new_line, os.linesep))
|
||||
ofile.write(
|
||||
salt.utils.stringutils.to_str(
|
||||
'{0}{1}'.format(new_line, os.linesep)
|
||||
)
|
||||
)
|
||||
|
||||
return 'Wrote {0} lines to "{1}"'.format(len(args), path)
|
||||
|
||||
@ -2966,7 +2991,8 @@ def prepend(path, *args, **kwargs):
|
||||
|
||||
try:
|
||||
with salt.utils.files.fopen(path) as fhr:
|
||||
contents = fhr.readlines()
|
||||
contents = [salt.utils.stringutils.to_unicode(line)
|
||||
for line in fhr.readlines()]
|
||||
except IOError:
|
||||
contents = []
|
||||
|
||||
@ -2974,9 +3000,9 @@ def prepend(path, *args, **kwargs):
|
||||
for line in args:
|
||||
preface.append('{0}\n'.format(line))
|
||||
|
||||
with salt.utils.files.fopen(path, "w") as ofile:
|
||||
with salt.utils.files.fopen(path, 'w') as ofile:
|
||||
contents = preface + contents
|
||||
ofile.write(''.join(contents))
|
||||
ofile.write(salt.utils.stringutils.to_str(''.join(contents)))
|
||||
return 'Prepended {0} lines to "{1}"'.format(len(args), path)
|
||||
|
||||
|
||||
@ -3024,7 +3050,7 @@ def write(path, *args, **kwargs):
|
||||
for line in args:
|
||||
contents.append('{0}\n'.format(line))
|
||||
with salt.utils.files.fopen(path, "w") as ofile:
|
||||
ofile.write(''.join(contents))
|
||||
ofile.write(salt.utils.stringutils.to_str(''.join(contents)))
|
||||
return 'Wrote {0} lines to "{1}"'.format(len(contents), path)
|
||||
|
||||
|
||||
@ -3054,8 +3080,8 @@ def touch(name, atime=None, mtime=None):
|
||||
mtime = int(mtime)
|
||||
try:
|
||||
if not os.path.exists(name):
|
||||
with salt.utils.files.fopen(name, 'a') as fhw:
|
||||
fhw.write('')
|
||||
with salt.utils.files.fopen(name, 'a'):
|
||||
pass
|
||||
|
||||
if not atime and not mtime:
|
||||
times = None
|
||||
@ -3405,7 +3431,7 @@ def read(path, binary=False):
|
||||
if binary is True:
|
||||
access_mode += 'b'
|
||||
with salt.utils.files.fopen(path, access_mode) as file_obj:
|
||||
return file_obj.read()
|
||||
return salt.utils.stringutils.to_unicode(file_obj.read())
|
||||
|
||||
|
||||
def readlink(path, canonicalize=False):
|
||||
@ -3507,7 +3533,11 @@ def stats(path, hash_type=None, follow_symlinks=True):
|
||||
pstat = os.lstat(path)
|
||||
except OSError:
|
||||
# Not a broken symlink, just a nonexistent path
|
||||
return ret
|
||||
# NOTE: The file.directory state checks the content of the error
|
||||
# message in this exception. Any changes made to the message for this
|
||||
# exception will reflect the file.directory state as well, and will
|
||||
# likely require changes there.
|
||||
raise CommandExecutionError('Path not found: {0}'.format(path))
|
||||
else:
|
||||
if follow_symlinks:
|
||||
pstat = os.stat(path)
|
||||
@ -3522,7 +3552,7 @@ def stats(path, hash_type=None, follow_symlinks=True):
|
||||
ret['mtime'] = pstat.st_mtime
|
||||
ret['ctime'] = pstat.st_ctime
|
||||
ret['size'] = pstat.st_size
|
||||
ret['mode'] = str(oct(stat.S_IMODE(pstat.st_mode)))
|
||||
ret['mode'] = six.text_type(oct(stat.S_IMODE(pstat.st_mode)))
|
||||
if hash_type:
|
||||
ret['sum'] = get_hash(path, hash_type)
|
||||
ret['type'] = 'file'
|
||||
@ -3978,8 +4008,15 @@ def get_managed(
|
||||
parsed_scheme = urlparsed_source.scheme
|
||||
parsed_path = os.path.join(
|
||||
urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep)
|
||||
unix_local_source = parsed_scheme in ('file', '')
|
||||
|
||||
if parsed_scheme and parsed_scheme.lower() in 'abcdefghijklmnopqrstuvwxyz':
|
||||
if unix_local_source:
|
||||
sfn = parsed_path
|
||||
if not os.path.exists(sfn):
|
||||
msg = 'Local file source {0} does not exist'.format(sfn)
|
||||
return '', {}, msg
|
||||
|
||||
if parsed_scheme and parsed_scheme.lower() in string.ascii_lowercase:
|
||||
parsed_path = ':'.join([parsed_scheme, parsed_path])
|
||||
parsed_scheme = 'file'
|
||||
|
||||
@ -3987,9 +4024,10 @@ def get_managed(
|
||||
source_sum = __salt__['cp.hash_file'](source, saltenv)
|
||||
if not source_sum:
|
||||
return '', {}, 'Source file {0} not found'.format(source)
|
||||
elif not source_hash and parsed_scheme == 'file':
|
||||
elif not source_hash and unix_local_source:
|
||||
source_sum = _get_local_file_source_sum(parsed_path)
|
||||
elif not source_hash and source.startswith(os.sep):
|
||||
# This should happen on Windows
|
||||
source_sum = _get_local_file_source_sum(source)
|
||||
else:
|
||||
if not skip_verify:
|
||||
@ -4147,13 +4185,13 @@ def extract_hash(hash_fn,
|
||||
hash_type = ''
|
||||
hash_len_expr = '{0},{1}'.format(min(HASHES_REVMAP), max(HASHES_REVMAP))
|
||||
else:
|
||||
hash_len_expr = str(hash_len)
|
||||
hash_len_expr = six.text_type(hash_len)
|
||||
|
||||
filename_separators = string.whitespace + r'\/'
|
||||
|
||||
if source_hash_name:
|
||||
if not isinstance(source_hash_name, six.string_types):
|
||||
source_hash_name = str(source_hash_name)
|
||||
source_hash_name = six.text_type(source_hash_name)
|
||||
source_hash_name_idx = (len(source_hash_name) + 1) * -1
|
||||
log.debug(
|
||||
'file.extract_hash: Extracting %s hash for file matching '
|
||||
@ -4163,12 +4201,12 @@ def extract_hash(hash_fn,
|
||||
)
|
||||
if file_name:
|
||||
if not isinstance(file_name, six.string_types):
|
||||
file_name = str(file_name)
|
||||
file_name = six.text_type(file_name)
|
||||
file_name_basename = os.path.basename(file_name)
|
||||
file_name_idx = (len(file_name_basename) + 1) * -1
|
||||
if source:
|
||||
if not isinstance(source, six.string_types):
|
||||
source = str(source)
|
||||
source = six.text_type(source)
|
||||
urlparsed_source = _urlparse(source)
|
||||
source_basename = os.path.basename(
|
||||
urlparsed_source.path or urlparsed_source.netloc
|
||||
@ -4192,7 +4230,7 @@ def extract_hash(hash_fn,
|
||||
|
||||
with salt.utils.files.fopen(hash_fn, 'r') as fp_:
|
||||
for line in fp_:
|
||||
line = line.strip()
|
||||
line = salt.utils.stringutils.to_unicode(line.strip())
|
||||
hash_re = r'(?i)(?<![a-z0-9])([a-f0-9]{' + hash_len_expr + '})(?![a-z0-9])'
|
||||
hash_match = re.search(hash_re, line)
|
||||
matched = None
|
||||
@ -4345,12 +4383,6 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False)
|
||||
# Check permissions
|
||||
perms = {}
|
||||
cur = stats(name, follow_symlinks=follow_symlinks)
|
||||
if not cur:
|
||||
# NOTE: The file.directory state checks the content of the error
|
||||
# message in this exception. Any changes made to the message for this
|
||||
# exception will reflect the file.directory state as well, and will
|
||||
# likely require changes there.
|
||||
raise CommandExecutionError('{0} does not exist'.format(name))
|
||||
perms['luser'] = cur['user']
|
||||
perms['lgroup'] = cur['group']
|
||||
perms['lmode'] = salt.utils.files.normalize_mode(cur['mode'])
|
||||
@ -4701,11 +4733,18 @@ def check_file_meta(
|
||||
lsattr_cmd = salt.utils.path.which('lsattr')
|
||||
changes = {}
|
||||
if not source_sum:
|
||||
source_sum = {}
|
||||
lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False)
|
||||
source_sum = dict()
|
||||
|
||||
try:
|
||||
lstats = stats(name, hash_type=source_sum.get('hash_type', None),
|
||||
follow_symlinks=False)
|
||||
except CommandExecutionError:
|
||||
lstats = {}
|
||||
|
||||
if not lstats:
|
||||
changes['newfile'] = name
|
||||
return changes
|
||||
|
||||
if 'hsum' in source_sum:
|
||||
if source_sum['hsum'] != lstats['sum']:
|
||||
if not sfn and source:
|
||||
@ -4854,7 +4893,7 @@ def get_diff(file1,
|
||||
source_hash=source_hash)
|
||||
if cached_path is False:
|
||||
errors.append(
|
||||
u'File {0} not found'.format(
|
||||
'File {0} not found'.format(
|
||||
salt.utils.stringutils.to_unicode(filename)
|
||||
)
|
||||
)
|
||||
@ -4874,20 +4913,21 @@ def get_diff(file1,
|
||||
for idx, filename in enumerate(files):
|
||||
try:
|
||||
with salt.utils.files.fopen(filename, 'r') as fp_:
|
||||
args.append(fp_.readlines())
|
||||
args.append([salt.utils.stringutils.to_unicode(x)
|
||||
for x in fp_.readlines()])
|
||||
except (IOError, OSError) as exc:
|
||||
raise CommandExecutionError(
|
||||
'Failed to read {0}: {1}'.format(
|
||||
salt.utils.stringutils.to_str(filename),
|
||||
salt.utils.stringutils.to_unicode(filename),
|
||||
exc.strerror
|
||||
)
|
||||
)
|
||||
|
||||
if args[0] != args[1]:
|
||||
if template and __salt__['config.option']('obfuscate_templates'):
|
||||
ret = u'<Obfuscated Template>'
|
||||
ret = '<Obfuscated Template>'
|
||||
elif not show_changes:
|
||||
ret = u'<show_changes=False>'
|
||||
ret = '<show_changes=False>'
|
||||
else:
|
||||
bdiff = _binary_replace(*files)
|
||||
if bdiff:
|
||||
@ -4895,14 +4935,14 @@ def get_diff(file1,
|
||||
else:
|
||||
if show_filenames:
|
||||
args.extend(
|
||||
[salt.utils.stringutils.to_str(x) for x in files]
|
||||
[salt.utils.stringutils.to_unicode(x) for x in files]
|
||||
)
|
||||
ret = salt.utils.locales.sdecode(
|
||||
''.join(difflib.unified_diff(*args)) # pylint: disable=no-value-for-parameter
|
||||
)
|
||||
return ret
|
||||
|
||||
return u''
|
||||
return ''
|
||||
|
||||
|
||||
def manage_file(name,
|
||||
@ -5004,11 +5044,11 @@ def manage_file(name,
|
||||
unable to stat the file as it exists on the fileserver and thus
|
||||
cannot mirror the mode on the salt-ssh minion
|
||||
|
||||
encoding : None
|
||||
If None, str() will be applied to contents.
|
||||
If not None, specified encoding will be used.
|
||||
See https://docs.python.org/3/library/codecs.html#standard-encodings
|
||||
for the list of available encodings.
|
||||
encoding
|
||||
If specified, then the specified encoding will be used. Otherwise, the
|
||||
file will be encoded using the system locale (usually UTF-8). See
|
||||
https://docs.python.org/3/library/codecs.html#standard-encodings for
|
||||
the list of available encodings.
|
||||
|
||||
.. versionadded:: 2017.7.0
|
||||
|
||||
@ -5040,21 +5080,22 @@ def manage_file(name,
|
||||
if source_sum and ('hsum' in source_sum):
|
||||
source_sum['hsum'] = source_sum['hsum'].lower()
|
||||
|
||||
if source and not sfn:
|
||||
# File is not present, cache it
|
||||
sfn = __salt__['cp.cache_file'](source, saltenv)
|
||||
if source:
|
||||
if not sfn:
|
||||
return _error(
|
||||
ret, 'Source file \'{0}\' not found'.format(source))
|
||||
htype = source_sum.get('hash_type', __opts__['hash_type'])
|
||||
# Recalculate source sum now that file has been cached
|
||||
source_sum = {
|
||||
'hash_type': htype,
|
||||
'hsum': get_hash(sfn, form=htype)
|
||||
}
|
||||
# File is not present, cache it
|
||||
sfn = __salt__['cp.cache_file'](source, saltenv)
|
||||
if not sfn:
|
||||
return _error(
|
||||
ret, 'Source file \'{0}\' not found'.format(source))
|
||||
htype = source_sum.get('hash_type', __opts__['hash_type'])
|
||||
# Recalculate source sum now that file has been cached
|
||||
source_sum = {
|
||||
'hash_type': htype,
|
||||
'hsum': get_hash(sfn, form=htype)
|
||||
}
|
||||
|
||||
if keep_mode:
|
||||
if _urlparse(source).scheme in ('salt', 'file') \
|
||||
or source.startswith('/'):
|
||||
if _urlparse(source).scheme in ('salt', 'file', ''):
|
||||
try:
|
||||
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
|
||||
except Exception as exc:
|
||||
@ -5084,7 +5125,7 @@ def manage_file(name,
|
||||
# source, and we are not skipping checksum verification, then
|
||||
# verify that it matches the specified checksum.
|
||||
if not skip_verify \
|
||||
and _urlparse(source).scheme not in ('salt', ''):
|
||||
and _urlparse(source).scheme != 'salt':
|
||||
dl_sum = get_hash(sfn, source_sum['hash_type'])
|
||||
if dl_sum != source_sum['hsum']:
|
||||
ret['comment'] = (
|
||||
@ -5219,12 +5260,12 @@ def manage_file(name,
|
||||
ret, _ = check_perms(name, ret, user, group, mode, attrs, follow_symlinks)
|
||||
|
||||
if ret['changes']:
|
||||
ret['comment'] = u'File {0} updated'.format(
|
||||
ret['comment'] = 'File {0} updated'.format(
|
||||
salt.utils.locales.sdecode(name)
|
||||
)
|
||||
|
||||
elif not ret['changes'] and ret['result']:
|
||||
ret['comment'] = u'File {0} is in the correct state'.format(
|
||||
ret['comment'] = 'File {0} is in the correct state'.format(
|
||||
salt.utils.locales.sdecode(name)
|
||||
)
|
||||
if sfn:
|
||||
@ -5246,10 +5287,10 @@ def manage_file(name,
|
||||
# dir_mode was not specified. Otherwise, any
|
||||
# directories created with makedirs_() below can't be
|
||||
# listed via a shell.
|
||||
mode_list = [x for x in str(mode)][-3:]
|
||||
mode_list = [x for x in six.text_type(mode)][-3:]
|
||||
for idx in range(len(mode_list)):
|
||||
if mode_list[idx] != '0':
|
||||
mode_list[idx] = str(int(mode_list[idx]) | 1)
|
||||
mode_list[idx] = six.text_type(int(mode_list[idx]) | 1)
|
||||
dir_mode = ''.join(mode_list)
|
||||
|
||||
if salt.utils.platform.is_windows():
|
||||
@ -5268,8 +5309,6 @@ def manage_file(name,
|
||||
makedirs_(name, user=user, group=group, mode=dir_mode)
|
||||
|
||||
if source:
|
||||
# It is a new file, set the diff accordingly
|
||||
ret['changes']['diff'] = 'New file'
|
||||
# Apply the new file
|
||||
if not sfn:
|
||||
sfn = __salt__['cp.cache_file'](source, saltenv)
|
||||
@ -5293,6 +5332,8 @@ def manage_file(name,
|
||||
)
|
||||
ret['result'] = False
|
||||
return ret
|
||||
# It is a new file, set the diff accordingly
|
||||
ret['changes']['diff'] = 'New file'
|
||||
if not os.path.isdir(contain_dir):
|
||||
if makedirs:
|
||||
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
|
||||
@ -5609,7 +5650,7 @@ def mknod_chrdev(name,
|
||||
ret['result'] = None
|
||||
else:
|
||||
if os.mknod(name,
|
||||
int(str(mode).lstrip('0Oo'), 8) | stat.S_IFCHR,
|
||||
int(six.text_type(mode).lstrip('0Oo'), 8) | stat.S_IFCHR,
|
||||
os.makedev(major, minor)) is None:
|
||||
ret['changes'] = {'new': 'Character device {0} created.'.format(name)}
|
||||
ret['result'] = True
|
||||
@ -5684,7 +5725,7 @@ def mknod_blkdev(name,
|
||||
ret['result'] = None
|
||||
else:
|
||||
if os.mknod(name,
|
||||
int(str(mode).lstrip('0Oo'), 8) | stat.S_IFBLK,
|
||||
int(six.text_type(mode).lstrip('0Oo'), 8) | stat.S_IFBLK,
|
||||
os.makedev(major, minor)) is None:
|
||||
ret['changes'] = {'new': 'Block device {0} created.'.format(name)}
|
||||
ret['result'] = True
|
||||
@ -5755,7 +5796,7 @@ def mknod_fifo(name,
|
||||
ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)}
|
||||
ret['result'] = None
|
||||
else:
|
||||
if os.mkfifo(name, int(str(mode).lstrip('0Oo'), 8)) is None:
|
||||
if os.mkfifo(name, int(six.text_type(mode).lstrip('0Oo'), 8)) is None:
|
||||
ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)}
|
||||
ret['result'] = True
|
||||
except OSError as exc:
|
||||
@ -5970,7 +6011,7 @@ def restore_backup(path, backup_id):
|
||||
ret = {'result': False,
|
||||
'comment': 'Invalid backup_id \'{0}\''.format(backup_id)}
|
||||
try:
|
||||
if len(str(backup_id)) == len(str(int(backup_id))):
|
||||
if len(six.text_type(backup_id)) == len(six.text_type(int(backup_id))):
|
||||
backup = list_backups(path)[int(backup_id)]
|
||||
else:
|
||||
return ret
|
||||
@ -6030,7 +6071,7 @@ def delete_backup(path, backup_id):
|
||||
ret = {'result': False,
|
||||
'comment': 'Invalid backup_id \'{0}\''.format(backup_id)}
|
||||
try:
|
||||
if len(str(backup_id)) == len(str(int(backup_id))):
|
||||
if len(six.text_type(backup_id)) == len(six.text_type(int(backup_id))):
|
||||
backup = list_backups(path)[int(backup_id)]
|
||||
else:
|
||||
return ret
|
||||
@ -6101,7 +6142,7 @@ def grep(path,
|
||||
try:
|
||||
split = salt.utils.args.shlex_split(opt)
|
||||
except AttributeError:
|
||||
split = salt.utils.args.shlex_split(str(opt))
|
||||
split = salt.utils.args.shlex_split(six.text_type(opt))
|
||||
if len(split) > 1:
|
||||
raise SaltInvocationError(
|
||||
'Passing multiple command line arguments in a single string '
|
||||
@ -6365,7 +6406,7 @@ def diskusage(path):
|
||||
ret = stat_structure.st_size
|
||||
return ret
|
||||
|
||||
for dirpath, dirnames, filenames in os.walk(path):
|
||||
for dirpath, dirnames, filenames in salt.utils.path.os_walk(path):
|
||||
for f in filenames:
|
||||
fp = os.path.join(dirpath, f)
|
||||
|
||||
|
@ -25,6 +25,7 @@ import logging
|
||||
# Import salt libs
|
||||
import salt.utils.data
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
from salt.ext.six import string_types
|
||||
from salt.exceptions import SaltInvocationError, CommandExecutionError
|
||||
from salt.ext import six
|
||||
@ -470,7 +471,7 @@ def list_all():
|
||||
'''
|
||||
if 'ports.list_all' not in __context__:
|
||||
__context__['ports.list_all'] = []
|
||||
for path, dirs, files in os.walk('/usr/ports'):
|
||||
for path, dirs, files in salt.utils.path.os_walk('/usr/ports'):
|
||||
stripped = path[len('/usr/ports'):]
|
||||
if stripped.count('/') != 2 or stripped.endswith('/CVS'):
|
||||
continue
|
||||
|
@ -1,6 +1,13 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Return/control aspects of the grains data
|
||||
|
||||
Grains set or altered with this module are stored in the 'grains'
|
||||
file on the minions. By default, this file is located at: ``/etc/salt/grains``
|
||||
|
||||
.. Note::
|
||||
|
||||
This does **NOT** override any grains set in the minion config file.
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
@ -20,6 +27,7 @@ from salt.ext import six
|
||||
import salt.utils.compat
|
||||
import salt.utils.data
|
||||
import salt.utils.files
|
||||
import salt.utils.platform
|
||||
import salt.utils.yamldumper
|
||||
from salt.defaults import DEFAULT_TARGET_DELIM
|
||||
from salt.exceptions import SaltException
|
||||
@ -223,20 +231,44 @@ def setvals(grains, destructive=False):
|
||||
raise SaltException('setvals grains must be a dictionary.')
|
||||
grains = {}
|
||||
if os.path.isfile(__opts__['conf_file']):
|
||||
gfn = os.path.join(
|
||||
os.path.dirname(__opts__['conf_file']),
|
||||
'grains'
|
||||
)
|
||||
if salt.utils.platform.is_proxy():
|
||||
gfn = os.path.join(
|
||||
os.path.dirname(__opts__['conf_file']),
|
||||
'proxy.d',
|
||||
__opts__['id'],
|
||||
'grains'
|
||||
)
|
||||
else:
|
||||
gfn = os.path.join(
|
||||
os.path.dirname(__opts__['conf_file']),
|
||||
'grains'
|
||||
)
|
||||
elif os.path.isdir(__opts__['conf_file']):
|
||||
gfn = os.path.join(
|
||||
__opts__['conf_file'],
|
||||
'grains'
|
||||
)
|
||||
if salt.utils.platform.is_proxy():
|
||||
gfn = os.path.join(
|
||||
__opts__['conf_file'],
|
||||
'proxy.d',
|
||||
__opts__['id'],
|
||||
'grains'
|
||||
)
|
||||
else:
|
||||
gfn = os.path.join(
|
||||
__opts__['conf_file'],
|
||||
'grains'
|
||||
)
|
||||
else:
|
||||
gfn = os.path.join(
|
||||
os.path.dirname(__opts__['conf_file']),
|
||||
'grains'
|
||||
)
|
||||
if salt.utils.platform.is_proxy():
|
||||
gfn = os.path.join(
|
||||
os.path.dirname(__opts__['conf_file']),
|
||||
'proxy.d',
|
||||
__opts__['id'],
|
||||
'grains'
|
||||
)
|
||||
else:
|
||||
gfn = os.path.join(
|
||||
os.path.dirname(__opts__['conf_file']),
|
||||
'grains'
|
||||
)
|
||||
|
||||
if os.path.isfile(gfn):
|
||||
with salt.utils.files.fopen(gfn, 'rb') as fp_:
|
||||
|
@ -11,6 +11,7 @@ import logging
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -133,7 +134,7 @@ def available():
|
||||
# Strip .ko from the basename
|
||||
ret.append(os.path.basename(line)[:-4])
|
||||
|
||||
for root, dirs, files in os.walk(mod_dir):
|
||||
for root, dirs, files in salt.utils.path.os_walk(mod_dir):
|
||||
for fn_ in files:
|
||||
if '.ko' in fn_:
|
||||
ret.append(fn_[:fn_.index('.ko')].replace('-', '_'))
|
||||
@ -141,7 +142,7 @@ def available():
|
||||
if 'Arch' in __grains__['os_family']:
|
||||
# Sadly this path is relative to kernel major version but ignores minor version
|
||||
mod_dir_arch = '/lib/modules/extramodules-' + os.uname()[2][0:3] + '-ARCH'
|
||||
for root, dirs, files in os.walk(mod_dir_arch):
|
||||
for root, dirs, files in salt.utils.path.os_walk(mod_dir_arch):
|
||||
for fn_ in files:
|
||||
if '.ko' in fn_:
|
||||
ret.append(fn_[:fn_.index('.ko')].replace('-', '_'))
|
||||
|
@ -21,6 +21,7 @@ import re
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
import salt.utils.platform
|
||||
import salt.utils.stringutils
|
||||
import salt.utils.decorators as decorators
|
||||
@ -78,7 +79,7 @@ def _available_services():
|
||||
'''
|
||||
available_services = dict()
|
||||
for launch_dir in _launchd_paths():
|
||||
for root, dirs, files in os.walk(launch_dir):
|
||||
for root, dirs, files in salt.utils.path.os_walk(launch_dir):
|
||||
for filename in files:
|
||||
file_path = os.path.join(root, filename)
|
||||
# Follow symbolic links of files in _launchd_paths
|
||||
|
@ -144,17 +144,17 @@ def _parse_acl(acl, user, group):
|
||||
# Set the permissions fields
|
||||
octal = 0
|
||||
vals['permissions'] = {}
|
||||
if 'r' in comps[2]:
|
||||
if 'r' in comps[-1]:
|
||||
octal += 4
|
||||
vals['permissions']['read'] = True
|
||||
else:
|
||||
vals['permissions']['read'] = False
|
||||
if 'w' in comps[2]:
|
||||
if 'w' in comps[-1]:
|
||||
octal += 2
|
||||
vals['permissions']['write'] = True
|
||||
else:
|
||||
vals['permissions']['write'] = False
|
||||
if 'x' in comps[2]:
|
||||
if 'x' in comps[-1]:
|
||||
octal += 1
|
||||
vals['permissions']['execute'] = True
|
||||
else:
|
||||
|
@ -72,7 +72,7 @@ def _available_services():
|
||||
'''
|
||||
available_services = dict()
|
||||
for launch_dir in _launchd_paths():
|
||||
for root, dirs, files in os.walk(launch_dir):
|
||||
for root, dirs, files in salt.utils.path.os_walk(launch_dir):
|
||||
for file_name in files:
|
||||
|
||||
# Must be a plist file
|
||||
|
@ -12,6 +12,7 @@ import os
|
||||
# import salt libs
|
||||
import salt.utils.data
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
import salt.utils.mac_utils
|
||||
import salt.utils.platform
|
||||
from salt.exceptions import CommandExecutionError, SaltInvocationError
|
||||
@ -330,7 +331,7 @@ def list_downloads():
|
||||
salt '*' softwareupdate.list_downloads
|
||||
'''
|
||||
outfiles = []
|
||||
for root, subFolder, files in os.walk('/Library/Updates'):
|
||||
for root, subFolder, files in salt.utils.path.os_walk('/Library/Updates'):
|
||||
for f in files:
|
||||
outfiles.append(os.path.join(root, f))
|
||||
|
||||
|
@ -19,12 +19,13 @@ import logging
|
||||
import time
|
||||
|
||||
# Import 3rdp-party libs
|
||||
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
|
||||
from salt.ext.six.moves import range, map # pylint: disable=import-error,redefined-builtin
|
||||
from salt.ext.six import string_types
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.args
|
||||
import salt.utils.decorators.path
|
||||
import salt.utils.files
|
||||
import salt.utils.stringutils
|
||||
import salt.utils.user
|
||||
from salt.utils.locales import sdecode as _sdecode
|
||||
@ -522,16 +523,72 @@ def get_auto_login():
|
||||
return False if ret['retcode'] else ret['stdout']
|
||||
|
||||
|
||||
def enable_auto_login(name):
|
||||
def _kcpassword(password):
|
||||
'''
|
||||
Internal function for obfuscating the password used for AutoLogin
|
||||
This is later written as the contents of the ``/etc/kcpassword`` file
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Adapted from:
|
||||
https://github.com/timsutton/osx-vm-templates/blob/master/scripts/support/set_kcpassword.py
|
||||
|
||||
Args:
|
||||
|
||||
password(str):
|
||||
The password to obfuscate
|
||||
|
||||
Returns:
|
||||
str: The obfuscated password
|
||||
'''
|
||||
# The magic 11 bytes - these are just repeated
|
||||
# 0x7D 0x89 0x52 0x23 0xD2 0xBC 0xDD 0xEA 0xA3 0xB9 0x1F
|
||||
key = [125, 137, 82, 35, 210, 188, 221, 234, 163, 185, 31]
|
||||
key_len = len(key)
|
||||
|
||||
# Convert each character to a byte
|
||||
password = list(map(ord, password))
|
||||
|
||||
# pad password length out to an even multiple of key length
|
||||
remainder = len(password) % key_len
|
||||
if remainder > 0:
|
||||
password = password + [0] * (key_len - remainder)
|
||||
|
||||
# Break the password into chunks the size of len(key) (11)
|
||||
for chunk_index in range(0, len(password), len(key)):
|
||||
# Reset the key_index to 0 for each iteration
|
||||
key_index = 0
|
||||
|
||||
# Do an XOR on each character of that chunk of the password with the
|
||||
# corresponding item in the key
|
||||
# The length of the password, or the length of the key, whichever is
|
||||
# smaller
|
||||
for password_index in range(chunk_index,
|
||||
min(chunk_index + len(key), len(password))):
|
||||
password[password_index] = password[password_index] ^ key[key_index]
|
||||
key_index += 1
|
||||
|
||||
# Convert each byte back to a character
|
||||
password = list(map(chr, password))
|
||||
return ''.join(password)
|
||||
|
||||
|
||||
def enable_auto_login(name, password):
|
||||
'''
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
Configures the machine to auto login with the specified user
|
||||
|
||||
:param str name: The user account use for auto login
|
||||
Args:
|
||||
|
||||
:return: True if successful, False if not
|
||||
:rtype: bool
|
||||
name (str): The user account use for auto login
|
||||
|
||||
password (str): The password to user for auto login
|
||||
|
||||
.. versionadded:: 2017.7.3
|
||||
|
||||
Returns:
|
||||
bool: ``True`` if successful, otherwise ``False``
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -539,6 +596,7 @@ def enable_auto_login(name):
|
||||
|
||||
salt '*' user.enable_auto_login stevej
|
||||
'''
|
||||
# Make the entry into the defaults file
|
||||
cmd = ['defaults',
|
||||
'write',
|
||||
'/Library/Preferences/com.apple.loginwindow.plist',
|
||||
@ -546,6 +604,13 @@ def enable_auto_login(name):
|
||||
name]
|
||||
__salt__['cmd.run'](cmd)
|
||||
current = get_auto_login()
|
||||
|
||||
# Create/Update the kcpassword file with an obfuscated password
|
||||
o_password = _kcpassword(password=password)
|
||||
with salt.utils.files.set_umask(0o077):
|
||||
with salt.utils.files.fopen('/etc/kcpassword', 'w') as fd:
|
||||
fd.write(o_password)
|
||||
|
||||
return current if isinstance(current, bool) else current.lower() == name.lower()
|
||||
|
||||
|
||||
@ -555,8 +620,8 @@ def disable_auto_login():
|
||||
|
||||
Disables auto login on the machine
|
||||
|
||||
:return: True if successful, False if not
|
||||
:rtype: bool
|
||||
Returns:
|
||||
bool: ``True`` if successful, otherwise ``False``
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -564,6 +629,11 @@ def disable_auto_login():
|
||||
|
||||
salt '*' user.disable_auto_login
|
||||
'''
|
||||
# Remove the kcpassword file
|
||||
cmd = 'rm -f /etc/kcpassword'
|
||||
__salt__['cmd.run'](cmd)
|
||||
|
||||
# Remove the entry from the defaults file
|
||||
cmd = ['defaults',
|
||||
'delete',
|
||||
'/Library/Preferences/com.apple.loginwindow.plist',
|
||||
|
@ -29,6 +29,7 @@ log = logging.getLogger(__name__)
|
||||
import salt.utils.files
|
||||
import salt.utils.napalm
|
||||
import salt.utils.templates
|
||||
import salt.utils.versions
|
||||
|
||||
# Import 3rd-party libs
|
||||
from salt.ext import six
|
||||
@ -1347,6 +1348,10 @@ def load_template(template_name,
|
||||
|
||||
To replace the config, set ``replace`` to ``True``.
|
||||
|
||||
.. warning::
|
||||
The support for native NAPALM templates will be dropped in Salt Fluorine.
|
||||
Implicitly, the ``template_path`` argument will be removed.
|
||||
|
||||
template_name
|
||||
Identifies path to the template source.
|
||||
The template can be either stored on the local machine, either remotely.
|
||||
@ -1383,6 +1388,9 @@ def load_template(template_name,
|
||||
in order to find the template, this argument must be provided:
|
||||
``template_path: /absolute/path/to/``.
|
||||
|
||||
.. note::
|
||||
This argument will be deprecated beginning with release codename ``Fluorine``.
|
||||
|
||||
template_hash: None
|
||||
Hash of the template file. Format: ``{hash_type: 'md5', 'hsum': <md5sum>}``
|
||||
|
||||
@ -1554,7 +1562,11 @@ def load_template(template_name,
|
||||
'out': None
|
||||
}
|
||||
loaded_config = None
|
||||
|
||||
if template_path:
|
||||
salt.utils.versions.warn_until(
|
||||
'Fluorine',
|
||||
'Use of `template_path` detected. This argument will be removed in Salt Fluorine.'
|
||||
)
|
||||
# prechecks
|
||||
if template_engine not in salt.utils.templates.TEMPLATE_REGISTRY:
|
||||
_loaded.update({
|
||||
|
@ -4,7 +4,8 @@ Package support for OpenBSD
|
||||
|
||||
.. note::
|
||||
|
||||
The package repository is configured on each host using ``/etc/pkg.conf``
|
||||
The package repository is configured on each host using ``/etc/installurl``
|
||||
from OpenBSD 6.1 onwards. Earlier releases relied on ``/etc/pkg.conf``.
|
||||
|
||||
.. versionchanged:: 2016.3.5
|
||||
|
||||
|
@ -11,6 +11,7 @@ import shutil
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
|
||||
# Import third party libs
|
||||
from salt.ext import six
|
||||
@ -170,7 +171,7 @@ def _unify_keywords():
|
||||
old_path = BASE_PATH.format('keywords')
|
||||
if os.path.exists(old_path):
|
||||
if os.path.isdir(old_path):
|
||||
for triplet in os.walk(old_path):
|
||||
for triplet in salt.utils.path.os_walk(old_path):
|
||||
for file_name in triplet[2]:
|
||||
file_path = '{0}/{1}'.format(triplet[0], file_name)
|
||||
with salt.utils.files.fopen(file_path) as fh_:
|
||||
@ -218,7 +219,7 @@ def _package_conf_ordering(conf, clean=True, keep_backup=False):
|
||||
|
||||
backup_files = []
|
||||
|
||||
for triplet in os.walk(path):
|
||||
for triplet in salt.utils.path.os_walk(path):
|
||||
for file_name in triplet[2]:
|
||||
file_path = '{0}/{1}'.format(triplet[0], file_name)
|
||||
cp = triplet[0][len(path) + 1:] + '/' + file_name
|
||||
@ -263,7 +264,7 @@ def _package_conf_ordering(conf, clean=True, keep_backup=False):
|
||||
pass
|
||||
|
||||
if clean:
|
||||
for triplet in os.walk(path):
|
||||
for triplet in salt.utils.path.os_walk(path):
|
||||
if len(triplet[1]) == 0 and len(triplet[2]) == 0 and \
|
||||
triplet[0] != path:
|
||||
shutil.rmtree(triplet[0])
|
||||
|
@ -247,7 +247,7 @@ def install_ruby(ruby, runas=None):
|
||||
|
||||
ret = {}
|
||||
ret = _rbenv_exec(['install', ruby], env=env, runas=runas, ret=ret)
|
||||
if ret['retcode'] == 0:
|
||||
if ret is not False and ret['retcode'] == 0:
|
||||
rehash(runas=runas)
|
||||
return ret['stderr']
|
||||
else:
|
||||
|
@ -22,7 +22,7 @@ Values or Entries
|
||||
Values/Entries are name/data pairs. There can be many values in a key. The
|
||||
(Default) value corresponds to the Key, the rest are their own value pairs.
|
||||
|
||||
:depends: - winreg Python module
|
||||
:depends: - PyWin32
|
||||
'''
|
||||
# When production windows installer is using Python 3, Python 2 code can be removed
|
||||
|
||||
@ -33,20 +33,20 @@ from __future__ import unicode_literals
|
||||
import sys
|
||||
import logging
|
||||
from salt.ext.six.moves import range # pylint: disable=W0622,import-error
|
||||
from salt.ext import six
|
||||
|
||||
# Import third party libs
|
||||
try:
|
||||
from salt.ext.six.moves import winreg as _winreg # pylint: disable=import-error,no-name-in-module
|
||||
from win32con import HWND_BROADCAST, WM_SETTINGCHANGE
|
||||
from win32api import RegCreateKeyEx, RegSetValueEx, RegFlushKey, \
|
||||
RegCloseKey, error as win32apiError, SendMessage
|
||||
import win32gui
|
||||
import win32api
|
||||
import win32con
|
||||
import pywintypes
|
||||
HAS_WINDOWS_MODULES = True
|
||||
except ImportError:
|
||||
HAS_WINDOWS_MODULES = False
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.platform
|
||||
import salt.utils.stringutils
|
||||
from salt.exceptions import CommandExecutionError
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
@ -58,7 +58,7 @@ __virtualname__ = 'reg'
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only works on Windows systems with the _winreg python module
|
||||
Only works on Windows systems with the PyWin32
|
||||
'''
|
||||
if not salt.utils.platform.is_windows():
|
||||
return (False, 'reg execution module failed to load: '
|
||||
@ -67,106 +67,76 @@ def __virtual__():
|
||||
if not HAS_WINDOWS_MODULES:
|
||||
return (False, 'reg execution module failed to load: '
|
||||
'One of the following libraries did not load: '
|
||||
+ '_winreg, win32gui, win32con, win32api')
|
||||
+ 'win32gui, win32con, win32api')
|
||||
|
||||
return __virtualname__
|
||||
|
||||
|
||||
# winreg in python 2 is hard coded to use codex 'mbcs', which uses
|
||||
# encoding that the user has assign. The function _unicode_to_mbcs
|
||||
# and _unicode_to_mbcs help with this.
|
||||
def _to_mbcs(vdata):
|
||||
'''
|
||||
Converts unicode to to current users character encoding. Use this for values
|
||||
returned by reg functions
|
||||
'''
|
||||
return salt.utils.stringutils.to_unicode(vdata, 'mbcs')
|
||||
|
||||
|
||||
def _unicode_to_mbcs(instr):
|
||||
def _to_unicode(vdata):
|
||||
'''
|
||||
Converts unicode to to current users character encoding.
|
||||
Converts from current users character encoding to unicode. Use this for
|
||||
parameters being pass to reg functions
|
||||
'''
|
||||
if isinstance(instr, six.text_type):
|
||||
# unicode to windows utf8
|
||||
return instr.encode('mbcs')
|
||||
else:
|
||||
# Assume its byte str or not a str/unicode
|
||||
return instr
|
||||
|
||||
|
||||
def _mbcs_to_unicode(instr):
|
||||
'''
|
||||
Converts from current users character encoding to unicode.
|
||||
When instr has a value of None, the return value of the function
|
||||
will also be None.
|
||||
'''
|
||||
if instr is None or isinstance(instr, six.text_type):
|
||||
return instr
|
||||
else:
|
||||
return six.text_type(instr, 'mbcs')
|
||||
|
||||
|
||||
def _mbcs_to_unicode_wrap(obj, vtype):
|
||||
'''
|
||||
Wraps _mbcs_to_unicode for use with registry vdata
|
||||
'''
|
||||
if vtype == 'REG_BINARY':
|
||||
# We should be able to leave it alone if the user has passed binary data in yaml with
|
||||
# binary !!
|
||||
# In python < 3 this should have type str and in python 3+ this should be a byte array
|
||||
return obj
|
||||
if isinstance(obj, list):
|
||||
return [_mbcs_to_unicode(x) for x in obj]
|
||||
elif isinstance(obj, six.integer_types):
|
||||
return obj
|
||||
else:
|
||||
return _mbcs_to_unicode(obj)
|
||||
return salt.utils.stringutils.to_unicode(vdata, 'utf-8')
|
||||
|
||||
|
||||
class Registry(object): # pylint: disable=R0903
|
||||
'''
|
||||
Delay '_winreg' usage until this module is used
|
||||
Delay usage until this module is used
|
||||
'''
|
||||
def __init__(self):
|
||||
self.hkeys = {
|
||||
'HKEY_CURRENT_USER': _winreg.HKEY_CURRENT_USER,
|
||||
'HKEY_LOCAL_MACHINE': _winreg.HKEY_LOCAL_MACHINE,
|
||||
'HKEY_USERS': _winreg.HKEY_USERS,
|
||||
'HKCU': _winreg.HKEY_CURRENT_USER,
|
||||
'HKLM': _winreg.HKEY_LOCAL_MACHINE,
|
||||
'HKU': _winreg.HKEY_USERS,
|
||||
'HKEY_CURRENT_USER': win32con.HKEY_CURRENT_USER,
|
||||
'HKEY_LOCAL_MACHINE': win32con.HKEY_LOCAL_MACHINE,
|
||||
'HKEY_USERS': win32con.HKEY_USERS,
|
||||
'HKCU': win32con.HKEY_CURRENT_USER,
|
||||
'HKLM': win32con.HKEY_LOCAL_MACHINE,
|
||||
'HKU': win32con.HKEY_USERS,
|
||||
}
|
||||
self.vtype = {
|
||||
'REG_BINARY': _winreg.REG_BINARY,
|
||||
'REG_DWORD': _winreg.REG_DWORD,
|
||||
'REG_EXPAND_SZ': _winreg.REG_EXPAND_SZ,
|
||||
'REG_MULTI_SZ': _winreg.REG_MULTI_SZ,
|
||||
'REG_SZ': _winreg.REG_SZ
|
||||
'REG_BINARY': win32con.REG_BINARY,
|
||||
'REG_DWORD': win32con.REG_DWORD,
|
||||
'REG_EXPAND_SZ': win32con.REG_EXPAND_SZ,
|
||||
'REG_MULTI_SZ': win32con.REG_MULTI_SZ,
|
||||
'REG_SZ': win32con.REG_SZ,
|
||||
'REG_QWORD': win32con.REG_QWORD
|
||||
}
|
||||
self.opttype = {
|
||||
'REG_OPTION_NON_VOLATILE': _winreg.REG_OPTION_NON_VOLATILE,
|
||||
'REG_OPTION_VOLATILE': _winreg.REG_OPTION_VOLATILE
|
||||
'REG_OPTION_NON_VOLATILE': 0,
|
||||
'REG_OPTION_VOLATILE': 1
|
||||
}
|
||||
# Return Unicode due to from __future__ import unicode_literals
|
||||
self.vtype_reverse = {
|
||||
_winreg.REG_BINARY: 'REG_BINARY',
|
||||
_winreg.REG_DWORD: 'REG_DWORD',
|
||||
_winreg.REG_EXPAND_SZ: 'REG_EXPAND_SZ',
|
||||
_winreg.REG_MULTI_SZ: 'REG_MULTI_SZ',
|
||||
_winreg.REG_SZ: 'REG_SZ',
|
||||
# REG_QWORD isn't in the winreg library
|
||||
11: 'REG_QWORD'
|
||||
win32con.REG_BINARY: 'REG_BINARY',
|
||||
win32con.REG_DWORD: 'REG_DWORD',
|
||||
win32con.REG_EXPAND_SZ: 'REG_EXPAND_SZ',
|
||||
win32con.REG_MULTI_SZ: 'REG_MULTI_SZ',
|
||||
win32con.REG_SZ: 'REG_SZ',
|
||||
win32con.REG_QWORD: 'REG_QWORD'
|
||||
}
|
||||
self.opttype_reverse = {
|
||||
_winreg.REG_OPTION_NON_VOLATILE: 'REG_OPTION_NON_VOLATILE',
|
||||
_winreg.REG_OPTION_VOLATILE: 'REG_OPTION_VOLATILE'
|
||||
0: 'REG_OPTION_NON_VOLATILE',
|
||||
1: 'REG_OPTION_VOLATILE'
|
||||
}
|
||||
# delete_key_recursive uses this to check the subkey contains enough \
|
||||
# as we do not want to remove all or most of the registry
|
||||
self.subkey_slash_check = {
|
||||
_winreg.HKEY_CURRENT_USER: 0,
|
||||
_winreg.HKEY_LOCAL_MACHINE: 1,
|
||||
_winreg.HKEY_USERS: 1
|
||||
win32con.HKEY_CURRENT_USER: 0,
|
||||
win32con.HKEY_LOCAL_MACHINE: 1,
|
||||
win32con.HKEY_USERS: 1
|
||||
}
|
||||
|
||||
self.registry_32 = {
|
||||
True: _winreg.KEY_READ | _winreg.KEY_WOW64_32KEY,
|
||||
False: _winreg.KEY_READ,
|
||||
True: win32con.KEY_READ | win32con.KEY_WOW64_32KEY,
|
||||
False: win32con.KEY_READ,
|
||||
}
|
||||
|
||||
def __getattr__(self, k):
|
||||
@ -189,21 +159,16 @@ def _key_exists(hive, key, use_32bit_registry=False):
|
||||
:return: Returns True if found, False if not found
|
||||
:rtype: bool
|
||||
'''
|
||||
|
||||
if PY2:
|
||||
local_hive = _mbcs_to_unicode(hive)
|
||||
local_key = _unicode_to_mbcs(key)
|
||||
else:
|
||||
local_hive = hive
|
||||
local_key = key
|
||||
local_hive = _to_unicode(hive)
|
||||
local_key = _to_unicode(key)
|
||||
|
||||
registry = Registry()
|
||||
hkey = registry.hkeys[local_hive]
|
||||
access_mask = registry.registry_32[use_32bit_registry]
|
||||
|
||||
try:
|
||||
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask)
|
||||
_winreg.CloseKey(handle)
|
||||
handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
|
||||
win32api.RegCloseKey(handle)
|
||||
return True
|
||||
except WindowsError: # pylint: disable=E0602
|
||||
return False
|
||||
@ -222,7 +187,10 @@ def broadcast_change():
|
||||
salt '*' reg.broadcast_change
|
||||
'''
|
||||
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms644952(v=vs.85).aspx
|
||||
return bool(SendMessage(HWND_BROADCAST, WM_SETTINGCHANGE, 0, 0))
|
||||
_, res = win32gui.SendMessageTimeout(
|
||||
win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 0,
|
||||
win32con.SMTO_ABORTIFHUNG, 5000)
|
||||
return not bool(res)
|
||||
|
||||
|
||||
def list_keys(hive, key=None, use_32bit_registry=False):
|
||||
@ -251,12 +219,8 @@ def list_keys(hive, key=None, use_32bit_registry=False):
|
||||
salt '*' reg.list_keys HKLM 'SOFTWARE'
|
||||
'''
|
||||
|
||||
if PY2:
|
||||
local_hive = _mbcs_to_unicode(hive)
|
||||
local_key = _unicode_to_mbcs(key)
|
||||
else:
|
||||
local_hive = hive
|
||||
local_key = key
|
||||
local_hive = _to_unicode(hive)
|
||||
local_key = _to_unicode(key)
|
||||
|
||||
registry = Registry()
|
||||
hkey = registry.hkeys[local_hive]
|
||||
@ -264,12 +228,12 @@ def list_keys(hive, key=None, use_32bit_registry=False):
|
||||
|
||||
subkeys = []
|
||||
try:
|
||||
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask)
|
||||
handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
|
||||
|
||||
for i in range(_winreg.QueryInfoKey(handle)[0]):
|
||||
subkey = _winreg.EnumKey(handle, i)
|
||||
for i in range(win32api.RegQueryInfoKey(handle)[0]):
|
||||
subkey = win32api.RegEnumKey(handle, i)
|
||||
if PY2:
|
||||
subkeys.append(_mbcs_to_unicode(subkey))
|
||||
subkeys.append(_to_unicode(subkey))
|
||||
else:
|
||||
subkeys.append(subkey)
|
||||
|
||||
@ -310,13 +274,8 @@ def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
|
||||
|
||||
salt '*' reg.list_values HKLM 'SYSTEM\\CurrentControlSet\\Services\\Tcpip'
|
||||
'''
|
||||
|
||||
if PY2:
|
||||
local_hive = _mbcs_to_unicode(hive)
|
||||
local_key = _unicode_to_mbcs(key)
|
||||
else:
|
||||
local_hive = hive
|
||||
local_key = key
|
||||
local_hive = _to_unicode(hive)
|
||||
local_key = _to_unicode(key)
|
||||
|
||||
registry = Registry()
|
||||
hkey = registry.hkeys[local_hive]
|
||||
@ -325,37 +284,21 @@ def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
|
||||
values = list()
|
||||
|
||||
try:
|
||||
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask)
|
||||
handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
|
||||
|
||||
for i in range(_winreg.QueryInfoKey(handle)[1]):
|
||||
vname, vdata, vtype = _winreg.EnumValue(handle, i)
|
||||
for i in range(win32api.RegQueryInfoKey(handle)[1]):
|
||||
vname, vdata, vtype = win32api.RegEnumValue(handle, i)
|
||||
|
||||
if not vname:
|
||||
vname = "(Default)"
|
||||
|
||||
value = {'hive': local_hive,
|
||||
'key': local_key,
|
||||
'vname': vname,
|
||||
'vdata': vdata,
|
||||
'vname': _to_mbcs(vname),
|
||||
'vdata': _to_mbcs(vdata),
|
||||
'vtype': registry.vtype_reverse[vtype],
|
||||
'success': True}
|
||||
values.append(value)
|
||||
if include_default:
|
||||
# Get the default value for the key
|
||||
value = {'hive': local_hive,
|
||||
'key': local_key,
|
||||
'vname': '(Default)',
|
||||
'vdata': None,
|
||||
'success': True}
|
||||
try:
|
||||
# QueryValueEx returns unicode data
|
||||
vdata, vtype = _winreg.QueryValueEx(handle, '(Default)')
|
||||
if vdata or vdata in [0, '']:
|
||||
value['vtype'] = registry.vtype_reverse[vtype]
|
||||
value['vdata'] = vdata
|
||||
else:
|
||||
value['comment'] = 'Empty Value'
|
||||
except WindowsError: # pylint: disable=E0602
|
||||
value['vdata'] = ('(value not set)')
|
||||
value['vtype'] = 'REG_SZ'
|
||||
values.append(value)
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
log.debug(exc)
|
||||
log.debug(r'Cannot find key: {0}\{1}'.format(hive, key))
|
||||
@ -401,30 +344,19 @@ def read_value(hive, key, vname=None, use_32bit_registry=False):
|
||||
|
||||
salt '*' reg.read_value HKEY_LOCAL_MACHINE 'SOFTWARE\Salt' 'version'
|
||||
'''
|
||||
|
||||
# If no name is passed, the default value of the key will be returned
|
||||
# The value name is Default
|
||||
|
||||
# Setup the return array
|
||||
if PY2:
|
||||
ret = {'hive': _mbcs_to_unicode(hive),
|
||||
'key': _mbcs_to_unicode(key),
|
||||
'vname': _mbcs_to_unicode(vname),
|
||||
'vdata': None,
|
||||
'success': True}
|
||||
local_hive = _mbcs_to_unicode(hive)
|
||||
local_key = _unicode_to_mbcs(key)
|
||||
local_vname = _unicode_to_mbcs(vname)
|
||||
local_hive = _to_unicode(hive)
|
||||
local_key = _to_unicode(key)
|
||||
local_vname = _to_unicode(vname)
|
||||
|
||||
else:
|
||||
ret = {'hive': hive,
|
||||
'key': key,
|
||||
'vname': vname,
|
||||
'vdata': None,
|
||||
'success': True}
|
||||
local_hive = hive
|
||||
local_key = key
|
||||
local_vname = vname
|
||||
ret = {'hive': local_hive,
|
||||
'key': local_key,
|
||||
'vname': local_vname,
|
||||
'vdata': None,
|
||||
'success': True}
|
||||
|
||||
if not vname:
|
||||
ret['vname'] = '(Default)'
|
||||
@ -434,19 +366,22 @@ def read_value(hive, key, vname=None, use_32bit_registry=False):
|
||||
access_mask = registry.registry_32[use_32bit_registry]
|
||||
|
||||
try:
|
||||
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask)
|
||||
handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
|
||||
try:
|
||||
# QueryValueEx returns unicode data
|
||||
vdata, vtype = _winreg.QueryValueEx(handle, local_vname)
|
||||
# RegQueryValueEx returns and accepts unicode data
|
||||
vdata, vtype = win32api.RegQueryValueEx(handle, local_vname)
|
||||
if vdata or vdata in [0, '']:
|
||||
ret['vtype'] = registry.vtype_reverse[vtype]
|
||||
ret['vdata'] = vdata
|
||||
if vtype == 7:
|
||||
ret['vdata'] = [_to_mbcs(i) for i in vdata]
|
||||
else:
|
||||
ret['vdata'] = _to_mbcs(vdata)
|
||||
else:
|
||||
ret['comment'] = 'Empty Value'
|
||||
except WindowsError: # pylint: disable=E0602
|
||||
ret['vdata'] = ('(value not set)')
|
||||
ret['vtype'] = 'REG_SZ'
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
except pywintypes.error as exc: # pylint: disable=E0602
|
||||
log.debug(exc)
|
||||
log.debug('Cannot find key: {0}\\{1}'.format(local_hive, local_key))
|
||||
ret['comment'] = 'Cannot find key: {0}\\{1}'.format(local_hive, local_key)
|
||||
@ -553,42 +488,47 @@ def set_value(hive,
|
||||
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2' \\
|
||||
vtype=REG_LIST vdata='[a,b,c]'
|
||||
'''
|
||||
|
||||
if PY2:
|
||||
try:
|
||||
local_hive = _mbcs_to_unicode(hive)
|
||||
local_key = _mbcs_to_unicode(key)
|
||||
local_vname = _mbcs_to_unicode(vname)
|
||||
local_vtype = _mbcs_to_unicode(vtype)
|
||||
local_vdata = _mbcs_to_unicode_wrap(vdata, local_vtype)
|
||||
except TypeError as exc: # pylint: disable=E0602
|
||||
log.error(exc, exc_info=True)
|
||||
return False
|
||||
else:
|
||||
local_hive = hive
|
||||
local_key = key
|
||||
local_vname = vname
|
||||
local_vdata = vdata
|
||||
local_vtype = vtype
|
||||
local_hive = _to_unicode(hive)
|
||||
local_key = _to_unicode(key)
|
||||
local_vname = _to_unicode(vname)
|
||||
local_vtype = _to_unicode(vtype)
|
||||
|
||||
registry = Registry()
|
||||
hkey = registry.hkeys[local_hive]
|
||||
vtype_value = registry.vtype[local_vtype]
|
||||
access_mask = registry.registry_32[use_32bit_registry] | _winreg.KEY_ALL_ACCESS
|
||||
access_mask = registry.registry_32[use_32bit_registry] | win32con.KEY_ALL_ACCESS
|
||||
|
||||
# Check data type and cast to expected type
|
||||
# int will automatically become long on 64bit numbers
|
||||
# https://www.python.org/dev/peps/pep-0237/
|
||||
|
||||
# String Types to Unicode
|
||||
if vtype_value in [1, 2]:
|
||||
local_vdata = _to_unicode(vdata)
|
||||
# Don't touch binary...
|
||||
elif vtype_value == 3:
|
||||
local_vdata = vdata
|
||||
# Make sure REG_MULTI_SZ is a list of strings
|
||||
elif vtype_value == 7:
|
||||
local_vdata = [_to_unicode(i) for i in vdata]
|
||||
# Everything else is int
|
||||
else:
|
||||
local_vdata = int(vdata)
|
||||
|
||||
if volatile:
|
||||
create_options = registry.opttype['REG_OPTION_VOLATILE']
|
||||
else:
|
||||
create_options = registry.opttype['REG_OPTION_NON_VOLATILE']
|
||||
|
||||
try:
|
||||
handle, _ = RegCreateKeyEx(hkey, local_key, access_mask,
|
||||
handle, _ = win32api.RegCreateKeyEx(hkey, local_key, access_mask,
|
||||
Options=create_options)
|
||||
RegSetValueEx(handle, local_vname, 0, vtype_value, local_vdata)
|
||||
RegFlushKey(handle)
|
||||
RegCloseKey(handle)
|
||||
win32api.RegSetValueEx(handle, local_vname, 0, vtype_value, local_vdata)
|
||||
win32api.RegFlushKey(handle)
|
||||
win32api.RegCloseKey(handle)
|
||||
broadcast_change()
|
||||
return True
|
||||
except (win32apiError, SystemError, ValueError, TypeError) as exc: # pylint: disable=E0602
|
||||
except (win32api.error, SystemError, ValueError, TypeError) as exc: # pylint: disable=E0602
|
||||
log.error(exc, exc_info=True)
|
||||
return False
|
||||
|
||||
@ -624,18 +564,14 @@ def delete_key_recursive(hive, key, use_32bit_registry=False):
|
||||
salt '*' reg.delete_key_recursive HKLM SOFTWARE\\salt
|
||||
'''
|
||||
|
||||
if PY2:
|
||||
local_hive = _mbcs_to_unicode(hive)
|
||||
local_key = _unicode_to_mbcs(key)
|
||||
else:
|
||||
local_hive = hive
|
||||
local_key = key
|
||||
local_hive = _to_unicode(hive)
|
||||
local_key = _to_unicode(key)
|
||||
|
||||
# Instantiate the registry object
|
||||
registry = Registry()
|
||||
hkey = registry.hkeys[local_hive]
|
||||
key_path = local_key
|
||||
access_mask = registry.registry_32[use_32bit_registry] | _winreg.KEY_ALL_ACCESS
|
||||
access_mask = registry.registry_32[use_32bit_registry] | win32con.KEY_ALL_ACCESS
|
||||
|
||||
if not _key_exists(local_hive, local_key, use_32bit_registry):
|
||||
return False
|
||||
@ -652,17 +588,17 @@ def delete_key_recursive(hive, key, use_32bit_registry=False):
|
||||
i = 0
|
||||
while True:
|
||||
try:
|
||||
subkey = _winreg.EnumKey(_key, i)
|
||||
subkey = win32api.RegEnumKey(_key, i)
|
||||
yield subkey
|
||||
i += 1
|
||||
except WindowsError: # pylint: disable=E0602
|
||||
except pywintypes.error: # pylint: disable=E0602
|
||||
break
|
||||
|
||||
def _traverse_registry_tree(_hkey, _keypath, _ret, _access_mask):
|
||||
'''
|
||||
Traverse the registry tree i.e. dive into the tree
|
||||
'''
|
||||
_key = _winreg.OpenKey(_hkey, _keypath, 0, _access_mask)
|
||||
_key = win32api.RegOpenKeyEx(_hkey, _keypath, 0, _access_mask)
|
||||
for subkeyname in _subkeys(_key):
|
||||
subkeypath = r'{0}\{1}'.format(_keypath, subkeyname)
|
||||
_ret = _traverse_registry_tree(_hkey, subkeypath, _ret, access_mask)
|
||||
@ -681,8 +617,8 @@ def delete_key_recursive(hive, key, use_32bit_registry=False):
|
||||
# Delete all sub_keys
|
||||
for sub_key_path in key_list:
|
||||
try:
|
||||
key_handle = _winreg.OpenKey(hkey, sub_key_path, 0, access_mask)
|
||||
_winreg.DeleteKey(key_handle, '')
|
||||
key_handle = win32api.RegOpenKeyEx(hkey, sub_key_path, 0, access_mask)
|
||||
win32api.RegDeleteKey(key_handle, '')
|
||||
ret['Deleted'].append(r'{0}\{1}'.format(hive, sub_key_path))
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
log.error(exc, exc_info=True)
|
||||
@ -721,23 +657,18 @@ def delete_value(hive, key, vname=None, use_32bit_registry=False):
|
||||
salt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version'
|
||||
'''
|
||||
|
||||
if PY2:
|
||||
local_hive = _mbcs_to_unicode(hive)
|
||||
local_key = _unicode_to_mbcs(key)
|
||||
local_vname = _unicode_to_mbcs(vname)
|
||||
else:
|
||||
local_hive = hive
|
||||
local_key = key
|
||||
local_vname = vname
|
||||
local_hive = _to_unicode(hive)
|
||||
local_key = _to_unicode(key)
|
||||
local_vname = _to_unicode(vname)
|
||||
|
||||
registry = Registry()
|
||||
hkey = registry.hkeys[local_hive]
|
||||
access_mask = registry.registry_32[use_32bit_registry] | _winreg.KEY_ALL_ACCESS
|
||||
access_mask = registry.registry_32[use_32bit_registry] | win32con.KEY_ALL_ACCESS
|
||||
|
||||
try:
|
||||
handle = _winreg.OpenKey(hkey, local_key, 0, access_mask)
|
||||
_winreg.DeleteValue(handle, local_vname)
|
||||
_winreg.CloseKey(handle)
|
||||
handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
|
||||
win32api.RegDeleteValue(handle, local_vname)
|
||||
win32api.RegCloseKey(handle)
|
||||
broadcast_change()
|
||||
return True
|
||||
except WindowsError as exc: # pylint: disable=E0602
|
||||
|
@ -20,6 +20,7 @@ import sys
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
|
||||
HAS_PSUTIL = False
|
||||
try:
|
||||
@ -158,7 +159,7 @@ def _deleted_files():
|
||||
if os.path.isfile(readlink):
|
||||
filenames.append(readlink)
|
||||
elif os.path.isdir(readlink) and readlink != '/':
|
||||
for root, dummy_dirs, files in os.walk(readlink, followlinks=True):
|
||||
for root, dummy_dirs, files in salt.utils.path.os_walk(readlink, followlinks=True):
|
||||
for name in files:
|
||||
filenames.append(os.path.join(root, name))
|
||||
|
||||
|
@ -1120,8 +1120,8 @@ def build_routes(iface, **settings):
|
||||
log.debug("IPv4 routes:\n{0}".format(opts4))
|
||||
log.debug("IPv6 routes:\n{0}".format(opts6))
|
||||
|
||||
routecfg = template.render(routes=opts4)
|
||||
routecfg6 = template.render(routes=opts6)
|
||||
routecfg = template.render(routes=opts4, iface=iface)
|
||||
routecfg6 = template.render(routes=opts6, iface=iface)
|
||||
|
||||
if settings['test']:
|
||||
routes = _read_temp(routecfg)
|
||||
|
@ -7,7 +7,7 @@ Wrapper for rsync
|
||||
This data can also be passed into :ref:`pillar <pillar-walk-through>`.
|
||||
Options passed into opts will overwrite options passed into pillar.
|
||||
'''
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
# Import python libs
|
||||
import errno
|
||||
|
@ -54,7 +54,8 @@ import time
|
||||
from json import loads, dumps
|
||||
import yaml
|
||||
try:
|
||||
import salt.utils
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
import salt.client
|
||||
import salt.exceptions
|
||||
except ImportError:
|
||||
@ -581,8 +582,8 @@ class StateTestLoader(object):
|
||||
log.info("gather_files: {}".format(time.time()))
|
||||
filepath = filepath + os.sep + 'saltcheck-tests'
|
||||
rootdir = filepath
|
||||
# for dirname, subdirlist, filelist in os.walk(rootdir):
|
||||
for dirname, dummy, filelist in os.walk(rootdir):
|
||||
# for dirname, subdirlist, filelist in salt.utils.path.os_walk(rootdir):
|
||||
for dirname, dummy, filelist in salt.utils.path.os_walk(rootdir):
|
||||
for fname in filelist:
|
||||
if fname.endswith('.tst'):
|
||||
start_path = dirname + os.sep + fname
|
||||
@ -612,8 +613,8 @@ class StateTestLoader(object):
|
||||
rootdir = full_path
|
||||
if os.path.isdir(full_path):
|
||||
log.info("searching path= {}".format(full_path))
|
||||
# for dirname, subdirlist, filelist in os.walk(rootdir, topdown=True):
|
||||
for dirname, subdirlist, dummy in os.walk(rootdir, topdown=True):
|
||||
# for dirname, subdirlist, filelist in salt.utils.path.os_walk(rootdir, topdown=True):
|
||||
for dirname, subdirlist, dummy in salt.utils.path.os_walk(rootdir, topdown=True):
|
||||
if "saltcheck-tests" in subdirlist:
|
||||
self.gather_files(dirname)
|
||||
log.info("test_files list: {}".format(self.test_files))
|
||||
|
@ -43,7 +43,7 @@ def create(name, profile):
|
||||
cmd = 'salt-cloud --out json -p {0} {1}'.format(profile, name)
|
||||
out = __salt__['cmd.run_stdout'](cmd, python_shell=False)
|
||||
try:
|
||||
ret = json.loads(out, object_hook=salt.utils.data.decode_dict)
|
||||
ret = json.loads(out, object_hook=salt.utils.data.encode_dict)
|
||||
except ValueError:
|
||||
ret = {}
|
||||
return ret
|
||||
|
@ -52,6 +52,7 @@ import salt.utils.extmods
|
||||
import salt.utils.files
|
||||
import salt.utils.functools
|
||||
import salt.utils.minion
|
||||
import salt.utils.path
|
||||
import salt.utils.process
|
||||
import salt.utils.url
|
||||
import salt.utils.versions
|
||||
@ -753,7 +754,7 @@ def list_extmods():
|
||||
mod_types = os.listdir(ext_dir)
|
||||
for mod_type in mod_types:
|
||||
ret[mod_type] = set()
|
||||
for _, _, files in os.walk(os.path.join(ext_dir, mod_type)):
|
||||
for _, _, files in salt.utils.path.os_walk(os.path.join(ext_dir, mod_type)):
|
||||
for fh_ in files:
|
||||
ret[mod_type].add(fh_.split('.')[0])
|
||||
ret[mod_type] = list(ret[mod_type])
|
||||
|
@ -59,6 +59,7 @@ SCHEDULE_CONF = [
|
||||
'return_kwargs',
|
||||
'run_on_start'
|
||||
'skip_during_range',
|
||||
'run_after_skip_range',
|
||||
]
|
||||
|
||||
|
||||
@ -100,8 +101,11 @@ def list_(show_all=False,
|
||||
log.debug('Event module not available. Schedule list failed.')
|
||||
return ret
|
||||
|
||||
_hidden = ['enabled',
|
||||
'skip_function',
|
||||
'skip_during_range']
|
||||
for job in list(schedule.keys()): # iterate over a copy since we will mutate it
|
||||
if job == 'enabled':
|
||||
if job in _hidden:
|
||||
continue
|
||||
|
||||
# Default jobs added by salt begin with __
|
||||
|
@ -106,17 +106,16 @@ def _set_retcode(ret, highstate=None):
|
||||
__context__['retcode'] = 2
|
||||
|
||||
|
||||
def _check_pillar(kwargs, pillar=None):
|
||||
def _get_pillar_errors(kwargs, pillar=None):
|
||||
'''
|
||||
Check the pillar for errors, refuse to run the state if there are errors
|
||||
in the pillar and return the pillar errors
|
||||
Checks all pillars (external and internal) for errors.
|
||||
Return an error message, if anywhere or None.
|
||||
|
||||
:param kwargs: dictionary of options
|
||||
:param pillar: external pillar
|
||||
:return: None or an error message
|
||||
'''
|
||||
if kwargs.get('force'):
|
||||
return True
|
||||
pillar_dict = pillar if pillar is not None else __pillar__
|
||||
if '_errors' in pillar_dict:
|
||||
return False
|
||||
return True
|
||||
return None if kwargs.get('force') else (pillar or {}).get('_errors', __pillar__.get('_errors')) or None
|
||||
|
||||
|
||||
def _wait(jid):
|
||||
@ -170,7 +169,7 @@ def _get_pause(jid, state_id=None):
|
||||
'''
|
||||
Return the pause information for a given jid
|
||||
'''
|
||||
pause_dir = os.path.join(__opts__[u'cachedir'], 'state_pause')
|
||||
pause_dir = os.path.join(__opts__['cachedir'], 'state_pause')
|
||||
pause_path = os.path.join(pause_dir, jid)
|
||||
if not os.path.exists(pause_dir):
|
||||
try:
|
||||
@ -197,7 +196,7 @@ def get_pauses(jid=None):
|
||||
'''
|
||||
ret = {}
|
||||
active = __salt__['saltutil.is_running']('state.*')
|
||||
pause_dir = os.path.join(__opts__[u'cachedir'], 'state_pause')
|
||||
pause_dir = os.path.join(__opts__['cachedir'], 'state_pause')
|
||||
if not os.path.exists(pause_dir):
|
||||
return ret
|
||||
if jid is None:
|
||||
@ -585,10 +584,10 @@ def template(tem, queue=False, **kwargs):
|
||||
context=__context__,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
|
||||
if errors:
|
||||
__context__['retcode'] = 5
|
||||
raise CommandExecutionError('Pillar failed to render',
|
||||
info=st_.opts['pillar']['_errors'])
|
||||
raise CommandExecutionError('Pillar failed to render', info=errors)
|
||||
|
||||
if not tem.endswith('.sls'):
|
||||
tem = '{sls}.sls'.format(sls=tem)
|
||||
@ -667,6 +666,18 @@ def apply_(mods=None,
|
||||
Values passed this way will override Pillar values set via
|
||||
``pillar_roots`` or an external Pillar source.
|
||||
|
||||
exclude
|
||||
Exclude specific states from execution. Accepts a list of sls names, a
|
||||
comma-separated string of sls names, or a list of dictionaries
|
||||
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
|
||||
multiple states.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.apply exclude=bar,baz
|
||||
salt '*' state.apply exclude=foo*
|
||||
salt '*' state.apply exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
|
||||
|
||||
queue : False
|
||||
Instead of failing immediately when another state run is in progress,
|
||||
queue the new state run to begin running once the other has finished.
|
||||
@ -932,6 +943,18 @@ def highstate(test=None, queue=False, **kwargs):
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
exclude
|
||||
Exclude specific states from execution. Accepts a list of sls names, a
|
||||
comma-separated string of sls names, or a list of dictionaries
|
||||
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
|
||||
multiple states.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.higstate exclude=bar,baz
|
||||
salt '*' state.higstate exclude=foo*
|
||||
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
|
||||
|
||||
saltenv
|
||||
Specify a salt fileserver environment to be used when applying states
|
||||
|
||||
@ -1041,11 +1064,10 @@ def highstate(test=None, queue=False, **kwargs):
|
||||
mocked=kwargs.get('mock', False),
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
errors = _get_pillar_errors(kwargs, st_.opts['pillar'])
|
||||
if errors:
|
||||
__context__['retcode'] = 5
|
||||
err = ['Pillar failed to render with the following messages:']
|
||||
err += __pillar__['_errors']
|
||||
return err
|
||||
return ['Pillar failed to render with the following messages:'] + errors
|
||||
|
||||
st_.push_active()
|
||||
ret = {}
|
||||
@ -1105,6 +1127,18 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
|
||||
|
||||
.. versionadded:: 2016.3.0
|
||||
|
||||
exclude
|
||||
Exclude specific states from execution. Accepts a list of sls names, a
|
||||
comma-separated string of sls names, or a list of dictionaries
|
||||
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
|
||||
multiple states.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' state.sls foo,bar,baz exclude=bar,baz
|
||||
salt '*' state.sls foo,bar,baz exclude=ba*
|
||||
salt '*' state.sls foo,bar,baz exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
|
||||
|
||||
queue : False
|
||||
Instead of failing immediately when another state run is in progress,
|
||||
queue the new state run to begin running once the other has finished.
|
||||
@ -1236,11 +1270,10 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
|
||||
mocked=kwargs.get('mock', False),
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
|
||||
if errors:
|
||||
__context__['retcode'] = 5
|
||||
err = ['Pillar failed to render with the following messages:']
|
||||
err += __pillar__['_errors']
|
||||
return err
|
||||
return ['Pillar failed to render with the following messages:'] + errors
|
||||
|
||||
orchestration_jid = kwargs.get('orchestration_jid')
|
||||
umask = os.umask(0o77)
|
||||
@ -1255,7 +1288,6 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
|
||||
mods = mods.split(',')
|
||||
|
||||
st_.push_active()
|
||||
ret = {}
|
||||
try:
|
||||
high_, errors = st_.render_highstate({opts['saltenv']: mods})
|
||||
|
||||
@ -1369,11 +1401,10 @@ def top(topfn, test=None, queue=False, **kwargs):
|
||||
pillar_enc=pillar_enc,
|
||||
context=__context__,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
|
||||
if errors:
|
||||
__context__['retcode'] = 5
|
||||
err = ['Pillar failed to render with the following messages:']
|
||||
err += __pillar__['_errors']
|
||||
return err
|
||||
return ['Pillar failed to render with the following messages:'] + errors
|
||||
|
||||
st_.push_active()
|
||||
st_.opts['state_top'] = salt.utils.url.create(topfn)
|
||||
@ -1437,10 +1468,10 @@ def show_highstate(queue=False, **kwargs):
|
||||
pillar_enc=pillar_enc,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
|
||||
if errors:
|
||||
__context__['retcode'] = 5
|
||||
raise CommandExecutionError('Pillar failed to render',
|
||||
info=st_.opts['pillar']['_errors'])
|
||||
raise CommandExecutionError('Pillar failed to render', info=errors)
|
||||
|
||||
st_.push_active()
|
||||
try:
|
||||
@ -1475,10 +1506,10 @@ def show_lowstate(queue=False, **kwargs):
|
||||
st_ = salt.state.HighState(opts,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
|
||||
if errors:
|
||||
__context__['retcode'] = 5
|
||||
raise CommandExecutionError('Pillar failed to render',
|
||||
info=st_.opts['pillar']['_errors'])
|
||||
raise CommandExecutionError('Pillar failed to render', info=errors)
|
||||
|
||||
st_.push_active()
|
||||
try:
|
||||
@ -1604,11 +1635,10 @@ def sls_id(id_, mods, test=None, queue=False, **kwargs):
|
||||
pillar_enc=pillar_enc,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
|
||||
if errors:
|
||||
__context__['retcode'] = 5
|
||||
err = ['Pillar failed to render with the following messages:']
|
||||
err += __pillar__['_errors']
|
||||
return err
|
||||
return ['Pillar failed to render with the following messages:'] + errors
|
||||
|
||||
if isinstance(mods, six.string_types):
|
||||
split_mods = mods.split(',')
|
||||
@ -1714,10 +1744,10 @@ def show_low_sls(mods, test=None, queue=False, **kwargs):
|
||||
pillar_override,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
|
||||
if errors:
|
||||
__context__['retcode'] = 5
|
||||
raise CommandExecutionError('Pillar failed to render',
|
||||
info=st_.opts['pillar']['_errors'])
|
||||
raise CommandExecutionError('Pillar failed to render', info=errors)
|
||||
|
||||
if isinstance(mods, six.string_types):
|
||||
mods = mods.split(',')
|
||||
@ -1803,10 +1833,10 @@ def show_sls(mods, test=None, queue=False, **kwargs):
|
||||
pillar_enc=pillar_enc,
|
||||
initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
|
||||
if errors:
|
||||
__context__['retcode'] = 5
|
||||
raise CommandExecutionError('Pillar failed to render',
|
||||
info=st_.opts['pillar']['_errors'])
|
||||
raise CommandExecutionError('Pillar failed to render', info=errors)
|
||||
|
||||
if isinstance(mods, six.string_types):
|
||||
mods = mods.split(',')
|
||||
@ -1851,10 +1881,10 @@ def show_top(queue=False, **kwargs):
|
||||
except NameError:
|
||||
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
|
||||
|
||||
if not _check_pillar(kwargs, st_.opts['pillar']):
|
||||
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
|
||||
if errors:
|
||||
__context__['retcode'] = 5
|
||||
raise CommandExecutionError('Pillar failed to render',
|
||||
info=st_.opts['pillar']['_errors'])
|
||||
raise CommandExecutionError('Pillar failed to render', info=errors)
|
||||
|
||||
errors = []
|
||||
top_ = st_.get_top()
|
||||
@ -1926,7 +1956,7 @@ def single(fun, name, test=None, queue=False, **kwargs):
|
||||
|
||||
st_._mod_init(kwargs)
|
||||
snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy'))
|
||||
ret = {u'{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(kwargs):
|
||||
ret = {'{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(kwargs):
|
||||
st_.call(kwargs)}
|
||||
_set_retcode(ret)
|
||||
# Work around Windows multiprocessing bug, set __opts__['test'] back to
|
||||
@ -1996,7 +2026,7 @@ def pkg(pkg_path,
|
||||
s_pkg.close()
|
||||
lowstate_json = os.path.join(root, 'lowstate.json')
|
||||
with salt.utils.files.fopen(lowstate_json, 'r') as fp_:
|
||||
lowstate = json.load(fp_, object_hook=salt.utils.data.decode_dict)
|
||||
lowstate = json.load(fp_, object_hook=salt.utils.data.encode_dict)
|
||||
# Check for errors in the lowstate
|
||||
for chunk in lowstate:
|
||||
if not isinstance(chunk, dict):
|
||||
@ -2011,7 +2041,7 @@ def pkg(pkg_path,
|
||||
roster_grains_json = os.path.join(root, 'roster_grains.json')
|
||||
if os.path.isfile(roster_grains_json):
|
||||
with salt.utils.files.fopen(roster_grains_json, 'r') as fp_:
|
||||
roster_grains = json.load(fp_, object_hook=salt.utils.data.decode_dict)
|
||||
roster_grains = json.load(fp_, object_hook=salt.utils.data.encode_dict)
|
||||
|
||||
if os.path.isfile(roster_grains_json):
|
||||
popts['grains'] = roster_grains
|
||||
|
@ -299,6 +299,9 @@ def cpustats():
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
Added support for OpenBSD
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -407,10 +410,28 @@ def cpustats():
|
||||
|
||||
return ret
|
||||
|
||||
def openbsd_cpustats():
|
||||
'''
|
||||
openbsd specific implementation of cpustats
|
||||
'''
|
||||
systat = __salt__['cmd.run']('systat -s 2 -B cpu').splitlines()
|
||||
fields = systat[3].split()
|
||||
ret = {}
|
||||
for cpu in systat[4:]:
|
||||
cpu_line = cpu.split()
|
||||
cpu_idx = cpu_line[0]
|
||||
ret[cpu_idx] = {}
|
||||
|
||||
for idx, field in enumerate(fields[1:]):
|
||||
ret[cpu_idx][field] = cpu_line[idx+1]
|
||||
|
||||
return ret
|
||||
|
||||
# dict that return a function that does the right thing per platform
|
||||
get_version = {
|
||||
'Linux': linux_cpustats,
|
||||
'FreeBSD': freebsd_cpustats,
|
||||
'OpenBSD': openbsd_cpustats,
|
||||
'SunOS': sunos_cpustats,
|
||||
'AIX': aix_cpustats,
|
||||
}
|
||||
@ -426,6 +447,9 @@ def meminfo():
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
Added support for OpenBSD
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -553,10 +577,25 @@ def meminfo():
|
||||
|
||||
return ret
|
||||
|
||||
def openbsd_meminfo():
|
||||
'''
|
||||
openbsd specific implementation of meminfo
|
||||
'''
|
||||
vmstat = __salt__['cmd.run']('vmstat').splitlines()
|
||||
# We're only interested in memory and page values which are printed
|
||||
# as subsequent fields.
|
||||
fields = ['active virtual pages', 'free list size', 'page faults',
|
||||
'pages reclaimed', 'pages paged in', 'pages paged out',
|
||||
'pages freed', 'pages scanned']
|
||||
data = vmstat[2].split()[2:10]
|
||||
ret = dict(zip(fields, data))
|
||||
return ret
|
||||
|
||||
# dict that return a function that does the right thing per platform
|
||||
get_version = {
|
||||
'Linux': linux_meminfo,
|
||||
'FreeBSD': freebsd_meminfo,
|
||||
'OpenBSD': openbsd_meminfo,
|
||||
'AIX': aix_meminfo,
|
||||
}
|
||||
|
||||
@ -572,6 +611,9 @@ def cpuinfo():
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
Added support for NetBSD and OpenBSD
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -602,14 +644,19 @@ def cpuinfo():
|
||||
|
||||
def bsd_cpuinfo():
|
||||
'''
|
||||
freebsd specific cpuinfo implementation
|
||||
bsd specific cpuinfo implementation
|
||||
'''
|
||||
freebsd_cmd = 'sysctl hw.model hw.ncpu'
|
||||
bsd_cmd = 'sysctl hw.model hw.ncpu'
|
||||
ret = {}
|
||||
for line in __salt__['cmd.run'](freebsd_cmd).splitlines():
|
||||
if __grains__['kernel'].lower() in ['netbsd', 'openbsd']:
|
||||
sep = '='
|
||||
else:
|
||||
sep = ':'
|
||||
|
||||
for line in __salt__['cmd.run'](bsd_cmd).splitlines():
|
||||
if not line:
|
||||
continue
|
||||
comps = line.split(':')
|
||||
comps = line.split(sep)
|
||||
comps[0] = comps[0].strip()
|
||||
ret[comps[0]] = comps[1].strip()
|
||||
return ret
|
||||
@ -754,6 +801,7 @@ def cpuinfo():
|
||||
get_version = {
|
||||
'Linux': linux_cpuinfo,
|
||||
'FreeBSD': bsd_cpuinfo,
|
||||
'NetBSD': bsd_cpuinfo,
|
||||
'OpenBSD': bsd_cpuinfo,
|
||||
'SunOS': sunos_cpuinfo,
|
||||
'AIX': aix_cpuinfo,
|
||||
@ -1032,19 +1080,46 @@ def nproc():
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
Added support for Darwin, FreeBSD and OpenBSD
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' status.nproc
|
||||
'''
|
||||
if __grains__['kernel'] == 'AIX':
|
||||
return _aix_nproc()
|
||||
def linux_nproc():
|
||||
'''
|
||||
linux specific implementation of nproc
|
||||
'''
|
||||
try:
|
||||
return _number(__salt__['cmd.run']('nproc').strip())
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
try:
|
||||
return _number(__salt__['cmd.run']('nproc').strip())
|
||||
except ValueError:
|
||||
return 0
|
||||
def generic_nproc():
|
||||
'''
|
||||
generic implementation of nproc
|
||||
'''
|
||||
ncpu_data = __salt__['sysctl.get']('hw.ncpu')
|
||||
if not ncpu_data:
|
||||
# We need at least one CPU to run
|
||||
return 1
|
||||
else:
|
||||
return _number(ncpu_data)
|
||||
|
||||
# dict that returns a function that does the right thing per platform
|
||||
get_version = {
|
||||
'Linux': linux_nproc,
|
||||
'Darwin': generic_nproc,
|
||||
'FreeBSD': generic_nproc,
|
||||
'OpenBSD': generic_nproc,
|
||||
'AIX': _aix_nproc,
|
||||
}
|
||||
|
||||
errmsg = 'This method is unsupported on the current operating system!'
|
||||
return get_version.get(__grains__['kernel'], lambda: errmsg)()
|
||||
|
||||
|
||||
def netstats():
|
||||
@ -1054,6 +1129,9 @@ def netstats():
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
Added support for OpenBSD
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1091,15 +1169,18 @@ def netstats():
|
||||
return ret
|
||||
|
||||
def freebsd_netstats():
|
||||
return bsd_netstats()
|
||||
|
||||
def bsd_netstats():
|
||||
'''
|
||||
freebsd specific netstats implementation
|
||||
bsd specific netstats implementation
|
||||
'''
|
||||
ret = {}
|
||||
for line in __salt__['cmd.run']('netstat -s').splitlines():
|
||||
if line.startswith('\t\t'):
|
||||
continue # Skip, too detailed
|
||||
if not line.startswith('\t'):
|
||||
key = line.split()[0]
|
||||
key = line.split()[0].replace(':', '')
|
||||
ret[key] = {}
|
||||
else:
|
||||
comps = line.split()
|
||||
@ -1159,7 +1240,8 @@ def netstats():
|
||||
# dict that returns a function that does the right thing per platform
|
||||
get_version = {
|
||||
'Linux': linux_netstats,
|
||||
'FreeBSD': freebsd_netstats,
|
||||
'FreeBSD': bsd_netstats,
|
||||
'OpenBSD': bsd_netstats,
|
||||
'SunOS': sunos_netstats,
|
||||
'AIX': aix_netstats,
|
||||
}
|
||||
@ -1472,6 +1554,9 @@ def version():
|
||||
.. versionchanged:: 2016.11.4
|
||||
Added support for AIX
|
||||
|
||||
.. versionchanged:: Oxygen
|
||||
Added support for OpenBSD
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
@ -1488,10 +1573,17 @@ def version():
|
||||
except IOError:
|
||||
return {}
|
||||
|
||||
def bsd_version():
|
||||
'''
|
||||
bsd specific implementation of version
|
||||
'''
|
||||
return __salt__['cmd.run']('sysctl -n kern.version')
|
||||
|
||||
# dict that returns a function that does the right thing per platform
|
||||
get_version = {
|
||||
'Linux': linux_version,
|
||||
'FreeBSD': lambda: __salt__['cmd.run']('sysctl -n kern.version'),
|
||||
'FreeBSD': bsd_version,
|
||||
'OpenBSD': bsd_version,
|
||||
'AIX': lambda: __salt__['cmd.run']('oslevel -s'),
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,7 @@ import stat
|
||||
|
||||
# Import Salt libs
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
import salt.utils.platform
|
||||
|
||||
# Import 3rd-party libs
|
||||
@ -231,7 +232,7 @@ def interfaces(root):
|
||||
reads = []
|
||||
writes = []
|
||||
|
||||
for path, _, files in os.walk(root, followlinks=False):
|
||||
for path, _, files in salt.utils.path.os_walk(root, followlinks=False):
|
||||
for afile in files:
|
||||
canpath = os.path.join(path, afile)
|
||||
|
||||
|
@ -120,7 +120,7 @@ def _get_zone_etc_localtime():
|
||||
hash_type = __opts__.get('hash_type', 'md5')
|
||||
tzfile_hash = salt.utils.hashutils.get_hash(tzfile, hash_type)
|
||||
# Not a link, just a copy of the tzdata file
|
||||
for root, dirs, files in os.walk(tzdir):
|
||||
for root, dirs, files in salt.utils.path.os_walk(tzdir):
|
||||
for filename in files:
|
||||
full_path = os.path.join(root, filename)
|
||||
olson_name = full_path[tzdir_len:]
|
||||
|
@ -241,7 +241,7 @@ def _iter_service_names():
|
||||
# is named rc-sysinit, while a configuration file /etc/init/net/apache.conf
|
||||
# is named net/apache'
|
||||
init_root = '/etc/init/'
|
||||
for root, dirnames, filenames in os.walk(init_root):
|
||||
for root, dirnames, filenames in salt.utils.path.os_walk(init_root):
|
||||
relpath = os.path.relpath(root, init_root)
|
||||
for filename in fnmatch.filter(filenames, '*.conf'):
|
||||
if relpath == '.':
|
||||
|
@ -317,7 +317,7 @@ def get_site_packages(venv):
|
||||
ret = __salt__['cmd.exec_code_all'](
|
||||
bin_path,
|
||||
'from distutils import sysconfig; '
|
||||
'print sysconfig.get_python_lib()'
|
||||
'print(sysconfig.get_python_lib())'
|
||||
)
|
||||
|
||||
if ret['retcode'] != 0:
|
||||
|
@ -59,7 +59,7 @@ from salt.modules.file import (check_hash, # pylint: disable=W0611
|
||||
lstat, path_exists_glob, write, pardir, join, HASHES, HASHES_REVMAP,
|
||||
comment, uncomment, _add_flags, comment_line, _regex_to_static,
|
||||
_get_line_indent, apply_template_on_contents, dirname, basename,
|
||||
list_backups_dir)
|
||||
list_backups_dir, _assert_occurrence, _starts_till)
|
||||
from salt.modules.file import normpath as normpath_
|
||||
|
||||
from salt.utils.functools import namespaced_function as _namespaced_function
|
||||
@ -117,7 +117,7 @@ def __virtual__():
|
||||
global write, pardir, join, _add_flags, apply_template_on_contents
|
||||
global path_exists_glob, comment, uncomment, _mkstemp_copy
|
||||
global _regex_to_static, _get_line_indent, dirname, basename
|
||||
global list_backups_dir, normpath_
|
||||
global list_backups_dir, normpath_, _assert_occurrence, _starts_till
|
||||
|
||||
replace = _namespaced_function(replace, globals())
|
||||
search = _namespaced_function(search, globals())
|
||||
@ -180,6 +180,8 @@ def __virtual__():
|
||||
basename = _namespaced_function(basename, globals())
|
||||
list_backups_dir = _namespaced_function(list_backups_dir, globals())
|
||||
normpath_ = _namespaced_function(normpath_, globals())
|
||||
_assert_occurrence = _namespaced_function(_assert_occurrence, globals())
|
||||
_starts_till = _namespaced_function(_starts_till, globals())
|
||||
|
||||
else:
|
||||
return False, 'Module win_file: Missing Win32 modules'
|
||||
@ -790,7 +792,7 @@ def chgrp(path, group):
|
||||
|
||||
def stats(path, hash_type='sha256', follow_symlinks=True):
|
||||
'''
|
||||
Return a dict containing the stats for a given file
|
||||
Return a dict containing the stats about a given file
|
||||
|
||||
Under Windows, `gid` will equal `uid` and `group` will equal `user`.
|
||||
|
||||
@ -819,6 +821,8 @@ def stats(path, hash_type='sha256', follow_symlinks=True):
|
||||
|
||||
salt '*' file.stats /etc/passwd
|
||||
'''
|
||||
# This is to mirror the behavior of file.py. `check_file_meta` expects an
|
||||
# empty dictionary when the file does not exist
|
||||
if not os.path.exists(path):
|
||||
raise CommandExecutionError('Path not found: {0}'.format(path))
|
||||
|
||||
@ -1344,7 +1348,7 @@ def makedirs_(path,
|
||||
|
||||
owner (str):
|
||||
The owner of the directory. If not passed, it will be the account
|
||||
that created the directly, likely SYSTEM
|
||||
that created the directory, likely SYSTEM.
|
||||
|
||||
grant_perms (dict):
|
||||
A dictionary containing the user/group and the basic permissions to
|
||||
@ -1466,7 +1470,7 @@ def makedirs_perms(path,
|
||||
|
||||
owner (str):
|
||||
The owner of the directory. If not passed, it will be the account
|
||||
that created the directory, likely SYSTEM
|
||||
that created the directory, likely SYSTEM.
|
||||
|
||||
grant_perms (dict):
|
||||
A dictionary containing the user/group and the basic permissions to
|
||||
@ -1504,7 +1508,7 @@ def makedirs_perms(path,
|
||||
.. versionadded:: Oxygen
|
||||
|
||||
Returns:
|
||||
bool: True if successful, otherwise raise an error
|
||||
bool: True if successful, otherwise raises an error
|
||||
|
||||
CLI Example:
|
||||
|
||||
@ -1620,6 +1624,9 @@ def check_perms(path,
|
||||
# Specify advanced attributes with a list
|
||||
salt '*' file.check_perms C:\\Temp\\ {} Administrators "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'files_only'}}"
|
||||
'''
|
||||
if not os.path.exists(path):
|
||||
raise CommandExecutionError('Path not found: {0}'.format(path))
|
||||
|
||||
path = os.path.expanduser(path)
|
||||
|
||||
if not ret:
|
||||
|
@ -52,6 +52,7 @@ import time
|
||||
from salt.exceptions import CommandExecutionError, SaltInvocationError
|
||||
import salt.utils.dictupdate as dictupdate
|
||||
import salt.utils.files
|
||||
import salt.utils.path
|
||||
import salt.utils.platform
|
||||
import salt.utils.stringutils
|
||||
|
||||
@ -619,8 +620,8 @@ class _policy_info(object):
|
||||
},
|
||||
},
|
||||
'RemoteRegistryExactPaths': {
|
||||
'Policy': 'Network access: Remotely accessible registry '
|
||||
'paths',
|
||||
'Policy': 'Network access: Remotely accessible '
|
||||
'registry paths',
|
||||
'lgpo_section': self.security_options_gpedit_path,
|
||||
'Registry': {
|
||||
'Hive': 'HKEY_LOCAL_MACHINE',
|
||||
@ -632,8 +633,8 @@ class _policy_info(object):
|
||||
},
|
||||
},
|
||||
'RemoteRegistryPaths': {
|
||||
'Policy': 'Network access: Remotely accessible registry '
|
||||
'paths and sub-paths',
|
||||
'Policy': 'Network access: Remotely accessible '
|
||||
'registry paths and sub-paths',
|
||||
'lgpo_section': self.security_options_gpedit_path,
|
||||
'Registry': {
|
||||
'Hive': 'HKEY_LOCAL_MACHINE',
|
||||
@ -644,8 +645,8 @@ class _policy_info(object):
|
||||
},
|
||||
},
|
||||
'RestrictNullSessAccess': {
|
||||
'Policy': 'Network access: Restrict anonymous access to '
|
||||
'Named Pipes and Shares',
|
||||
'Policy': 'Network access: Restrict anonymous access '
|
||||
'to Named Pipes and Shares',
|
||||
'lgpo_section': self.security_options_gpedit_path,
|
||||
'Settings': self.enabled_one_disabled_zero.keys(),
|
||||
'Registry': {
|
||||
@ -898,9 +899,9 @@ class _policy_info(object):
|
||||
'Transform': self.enabled_one_disabled_zero_transform,
|
||||
},
|
||||
'CachedLogonsCount': {
|
||||
'Policy': 'Interactive logon: Number of previous logons '
|
||||
'to cache (in case domain controller is not '
|
||||
'available)',
|
||||
'Policy': 'Interactive logon: Number of previous '
|
||||
'logons to cache (in case domain controller '
|
||||
'is not available)',
|
||||
'Settings': {
|
||||
'Function': '_in_range_inclusive',
|
||||
'Args': {'min': 0, 'max': 50}
|
||||
@ -915,8 +916,9 @@ class _policy_info(object):
|
||||
},
|
||||
},
|
||||
'ForceUnlockLogon': {
|
||||
'Policy': 'Interactive logon: Require Domain Controller '
|
||||
'authentication to unlock workstation',
|
||||
'Policy': 'Interactive logon: Require Domain '
|
||||
'Controller authentication to unlock '
|
||||
'workstation',
|
||||
'Settings': self.enabled_one_disabled_zero.keys(),
|
||||
'lgpo_section': self.security_options_gpedit_path,
|
||||
'Registry': {
|
||||
@ -983,8 +985,8 @@ class _policy_info(object):
|
||||
},
|
||||
'EnableUIADesktopToggle': {
|
||||
'Policy': 'User Account Control: Allow UIAccess '
|
||||
'applications to prompt for elevation without '
|
||||
'using the secure desktop',
|
||||
'applications to prompt for elevation '
|
||||
'without using the secure desktop',
|
||||
'Settings': self.enabled_one_disabled_zero.keys(),
|
||||
'lgpo_section': self.security_options_gpedit_path,
|
||||
'Registry': {
|
||||
@ -998,8 +1000,8 @@ class _policy_info(object):
|
||||
},
|
||||
'ConsentPromptBehaviorAdmin': {
|
||||
'Policy': 'User Account Control: Behavior of the '
|
||||
'elevation prompt for administrators in Admin '
|
||||
'Approval Mode',
|
||||
'elevation prompt for administrators in '
|
||||
'Admin Approval Mode',
|
||||
'Settings': self.uac_admin_prompt_lookup.keys(),
|
||||
'lgpo_section': self.security_options_gpedit_path,
|
||||
'Registry': {
|
||||
@ -1077,7 +1079,7 @@ class _policy_info(object):
|
||||
},
|
||||
'EnableSecureUIAPaths': {
|
||||
'Policy': 'User Account Control: Only elevate UIAccess '
|
||||
'applicaitons that are installed in secure '
|
||||
'applications that are installed in secure '
|
||||
'locations',
|
||||
'Settings': self.enabled_one_disabled_zero.keys(),
|
||||
'lgpo_section': self.security_options_gpedit_path,
|
||||
@ -1091,8 +1093,8 @@ class _policy_info(object):
|
||||
'Transform': self.enabled_one_disabled_zero_transform,
|
||||
},
|
||||
'EnableLUA': {
|
||||
'Policy': 'User Account Control: Run all administrators '
|
||||
'in Admin Approval Mode',
|
||||
'Policy': 'User Account Control: Run all '
|
||||
'administrators in Admin Approval Mode',
|
||||
'Settings': self.enabled_one_disabled_zero.keys(),
|
||||
'lgpo_section': self.security_options_gpedit_path,
|
||||
'Registry': {
|
||||
@ -1354,8 +1356,8 @@ class _policy_info(object):
|
||||
'Transform': self.enabled_one_disabled_zero_transform,
|
||||
},
|
||||
'EnableForcedLogoff': {
|
||||
'Policy': 'Microsoft network server: Disconnect clients '
|
||||
'when logon hours expire',
|
||||
'Policy': 'Microsoft network server: Disconnect '
|
||||
'clients when logon hours expire',
|
||||
'Settings': self.enabled_one_disabled_zero.keys(),
|
||||
'lgpo_section': self.security_options_gpedit_path,
|
||||
'Registry': {
|
||||
@ -1422,7 +1424,8 @@ class _policy_info(object):
|
||||
'Transform': self.enabled_one_disabled_zero_transform,
|
||||
},
|
||||
'UndockWithoutLogon': {
|
||||
'Policy': 'Devices: Allow undock without having to log on',
|
||||
'Policy': 'Devices: Allow undock without having to log '
|
||||
'on',
|
||||
'Settings': self.enabled_one_disabled_zero.keys(),
|
||||
'lgpo_section': self.security_options_gpedit_path,
|
||||
'Registry': {
|
||||
@ -1497,8 +1500,8 @@ class _policy_info(object):
|
||||
},
|
||||
},
|
||||
'SubmitControl': {
|
||||
'Policy': 'Domain controller: Allow server operators to '
|
||||
'schedule tasks',
|
||||
'Policy': 'Domain controller: Allow server operators '
|
||||
'to schedule tasks',
|
||||
'Settings': self.enabled_one_disabled_zero_strings.keys(),
|
||||
'lgpo_section': self.security_options_gpedit_path,
|
||||
'Registry': {
|
||||
@ -1577,8 +1580,8 @@ class _policy_info(object):
|
||||
'Transform': self.enabled_one_disabled_zero_strings_transform,
|
||||
},
|
||||
'SignSecureChannel': {
|
||||
'Policy': 'Domain member: Digitally sign secure channel '
|
||||
'data (when possible)',
|
||||
'Policy': 'Domain member: Digitally sign secure '
|
||||
'channel data (when possible)',
|
||||
'Settings': self.enabled_one_disabled_zero_strings.keys(),
|
||||
'lgpo_section': self.security_options_gpedit_path,
|
||||
'Registry': {
|
||||
@ -2301,7 +2304,7 @@ class _policy_info(object):
|
||||
},
|
||||
'RecoveryConsoleSecurityLevel': {
|
||||
'Policy': 'Recovery console: Allow automatic '
|
||||
'adminstrative logon',
|
||||
'administrative logon',
|
||||
'Settings': self.enabled_one_disabled_zero.keys(),
|
||||
'lgpo_section': self.security_options_gpedit_path,
|
||||
'Registry': {
|
||||
@ -2433,15 +2436,18 @@ class _policy_info(object):
|
||||
'''
|
||||
converts a binary 0/1 to Disabled/Enabled
|
||||
'''
|
||||
if val is not None:
|
||||
if ord(val) == 0:
|
||||
return 'Disabled'
|
||||
elif ord(val) == 1:
|
||||
return 'Enabled'
|
||||
try:
|
||||
if val is not None:
|
||||
if ord(val) == 0:
|
||||
return 'Disabled'
|
||||
elif ord(val) == 1:
|
||||
return 'Enabled'
|
||||
else:
|
||||
return 'Invalid Value'
|
||||
else:
|
||||
return 'Invalid Value'
|
||||
else:
|
||||
return 'Not Defined'
|
||||
return 'Not Defined'
|
||||
except TypeError:
|
||||
return 'Invalid Value'
|
||||
|
||||
@classmethod
|
||||
def _binary_enable_zero_disable_one_reverse_conversion(cls, val, **kwargs):
|
||||
@ -2741,7 +2747,7 @@ def _processPolicyDefinitions(policy_def_path='c:\\Windows\\PolicyDefinitions',
|
||||
policydefs_resources_localname_xpath = etree.XPath(
|
||||
'//*[local-name() = "policyDefinitionResources"]/*')
|
||||
policydef_resources_xpath = etree.XPath('/policyDefinitionResources')
|
||||
for root, dirs, files in os.walk(policy_def_path):
|
||||
for root, dirs, files in salt.utils.path.os_walk(policy_def_path):
|
||||
if root == policy_def_path:
|
||||
for t_admfile in files:
|
||||
admfile = os.path.join(root, t_admfile)
|
||||
@ -3502,7 +3508,7 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element,
|
||||
reg_key,
|
||||
reg_valuename,
|
||||
chr(registry.vtype[this_vtype]),
|
||||
six.unichr(len(this_element_value.encode('utf-16-le'))),
|
||||
six.unichr(len(this_element_value.encode('utf-16-le', '' if six.PY2 else 'surrogatepass'))),
|
||||
this_element_value)
|
||||
return expected_string
|
||||
|
||||
@ -4242,8 +4248,8 @@ def _writeAdminTemplateRegPolFile(admtemplate_data,
|
||||
for adm_namespace in admtemplate_data:
|
||||
for adm_policy in admtemplate_data[adm_namespace]:
|
||||
if str(admtemplate_data[adm_namespace][adm_policy]).lower() == 'not configured':
|
||||
if adm_policy in base_policy_settings[adm_namespace]:
|
||||
base_policy_settings[adm_namespace].pop(adm_policy)
|
||||
if base_policy_settings.get(adm_namespace, {}).pop(adm_policy, None) is not None:
|
||||
log.debug('Policy "{0}" removed'.format(adm_policy))
|
||||
else:
|
||||
log.debug('adding {0} to base_policy_settings'.format(adm_policy))
|
||||
if adm_namespace not in base_policy_settings:
|
||||
|
@ -39,6 +39,7 @@ import logging
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import sys
|
||||
from functools import cmp_to_key
|
||||
|
||||
# Import third party libs
|
||||
@ -104,7 +105,7 @@ def latest_version(*names, **kwargs):
|
||||
salt '*' pkg.latest_version <package name>
|
||||
salt '*' pkg.latest_version <package1> <package2> <package3> ...
|
||||
'''
|
||||
if len(names) == 0:
|
||||
if not names:
|
||||
return ''
|
||||
|
||||
# Initialize the return dict with empty strings
|
||||
@ -129,6 +130,8 @@ def latest_version(*names, **kwargs):
|
||||
if name in installed_pkgs:
|
||||
log.trace('Determining latest installed version of %s', name)
|
||||
try:
|
||||
# installed_pkgs[name] Can be version number or 'Not Found'
|
||||
# 'Not Found' occurs when version number is not found in the registry
|
||||
latest_installed = sorted(
|
||||
installed_pkgs[name],
|
||||
key=cmp_to_key(_reverse_cmp_pkg_versions)
|
||||
@ -145,6 +148,8 @@ def latest_version(*names, **kwargs):
|
||||
# get latest available (from winrepo_dir) version of package
|
||||
pkg_info = _get_package_info(name, saltenv=saltenv)
|
||||
log.trace('Raw winrepo pkg_info for {0} is {1}'.format(name, pkg_info))
|
||||
|
||||
# latest_available can be version number or 'latest' or even 'Not Found'
|
||||
latest_available = _get_latest_pkg_version(pkg_info)
|
||||
if latest_available:
|
||||
log.debug('Latest available version '
|
||||
@ -152,9 +157,9 @@ def latest_version(*names, **kwargs):
|
||||
|
||||
# check, whether latest available version
|
||||
# is newer than latest installed version
|
||||
if salt.utils.versions.compare(ver1=str(latest_available),
|
||||
oper='>',
|
||||
ver2=str(latest_installed)):
|
||||
if compare_versions(ver1=str(latest_available),
|
||||
oper='>',
|
||||
ver2=str(latest_installed)):
|
||||
log.debug('Upgrade of {0} from {1} to {2} '
|
||||
'is available'.format(name,
|
||||
latest_installed,
|
||||
@ -193,10 +198,9 @@ def upgrade_available(name, **kwargs):
|
||||
# same default as latest_version
|
||||
refresh = salt.utils.data.is_true(kwargs.get('refresh', True))
|
||||
|
||||
current = version(name, saltenv=saltenv, refresh=refresh).get(name)
|
||||
latest = latest_version(name, saltenv=saltenv, refresh=False)
|
||||
|
||||
return compare_versions(latest, '>', current)
|
||||
# if latest_version returns blank, the latest version is already installed or
|
||||
# their is no package definition. This is a salt standard which could be improved.
|
||||
return latest_version(name, saltenv=saltenv, refresh=refresh) != ''
|
||||
|
||||
|
||||
def list_upgrades(refresh=True, **kwargs):
|
||||
@ -227,9 +231,13 @@ def list_upgrades(refresh=True, **kwargs):
|
||||
pkgs = {}
|
||||
for pkg in installed_pkgs:
|
||||
if pkg in available_pkgs:
|
||||
# latest_version() will be blank if the latest version is installed.
|
||||
# or the package name is wrong. Given we check available_pkgs, this
|
||||
# should not be the case of wrong package name.
|
||||
# Note: latest_version() is an expensive way to do this as it
|
||||
# calls list_pkgs each time.
|
||||
latest_ver = latest_version(pkg, refresh=False, saltenv=saltenv)
|
||||
install_ver = installed_pkgs[pkg]
|
||||
if compare_versions(latest_ver, '>', install_ver):
|
||||
if latest_ver:
|
||||
pkgs[pkg] = latest_ver
|
||||
|
||||
return pkgs
|
||||
@ -246,7 +254,7 @@ def list_available(*names, **kwargs):
|
||||
|
||||
saltenv (str): The salt environment to use. Default ``base``.
|
||||
|
||||
refresh (bool): Refresh package metadata. Default ``True``.
|
||||
refresh (bool): Refresh package metadata. Default ``False``.
|
||||
|
||||
return_dict_always (bool):
|
||||
Default ``False`` dict when a single package name is queried.
|
||||
@ -269,11 +277,10 @@ def list_available(*names, **kwargs):
|
||||
return ''
|
||||
|
||||
saltenv = kwargs.get('saltenv', 'base')
|
||||
refresh = salt.utils.data.is_true(kwargs.get('refresh', True))
|
||||
return_dict_always = \
|
||||
salt.utils.data.is_true(kwargs.get('return_dict_always', False))
|
||||
|
||||
refresh = salt.utils.data.is_true(kwargs.get('refresh', False))
|
||||
_refresh_db_conditional(saltenv, force=refresh)
|
||||
return_dict_always = \
|
||||
salt.utils.is_true(kwargs.get('return_dict_always', False))
|
||||
if len(names) == 1 and not return_dict_always:
|
||||
pkginfo = _get_package_info(names[0], saltenv=saltenv)
|
||||
if not pkginfo:
|
||||
@ -298,7 +305,9 @@ def list_available(*names, **kwargs):
|
||||
|
||||
def version(*names, **kwargs):
|
||||
'''
|
||||
Returns a version if the package is installed, else returns an empty string
|
||||
Returns a string representing the package version or an empty string if not
|
||||
installed. If more than one package name is specified, a dict of
|
||||
name/version pairs is returned.
|
||||
|
||||
Args:
|
||||
name (str): One or more package names
|
||||
@ -308,10 +317,11 @@ def version(*names, **kwargs):
|
||||
refresh (bool): Refresh package metadata. Default ``False``.
|
||||
|
||||
Returns:
|
||||
str: version string when a single package is specified.
|
||||
dict: The package name(s) with the installed versions.
|
||||
|
||||
.. code-block:: cfg
|
||||
|
||||
{['<version>', '<version>', ]} OR
|
||||
{'<package name>': ['<version>', '<version>', ]}
|
||||
|
||||
CLI Example:
|
||||
@ -320,19 +330,25 @@ def version(*names, **kwargs):
|
||||
|
||||
salt '*' pkg.version <package name>
|
||||
salt '*' pkg.version <package name01> <package name02>
|
||||
'''
|
||||
saltenv = kwargs.get('saltenv', 'base')
|
||||
|
||||
installed_pkgs = list_pkgs(refresh=kwargs.get('refresh', False))
|
||||
available_pkgs = get_repo_data(saltenv).get('repo')
|
||||
'''
|
||||
# Standard is return empty string even if not a valid name
|
||||
# TODO: Look at returning an error across all platforms with
|
||||
# CommandExecutionError(msg,info={'errors': errors })
|
||||
# available_pkgs = get_repo_data(saltenv).get('repo')
|
||||
# for name in names:
|
||||
# if name in available_pkgs:
|
||||
# ret[name] = installed_pkgs.get(name, '')
|
||||
|
||||
saltenv = kwargs.get('saltenv', 'base')
|
||||
installed_pkgs = list_pkgs(saltenv=saltenv, refresh=kwargs.get('refresh', False))
|
||||
|
||||
if len(names) == 1:
|
||||
return installed_pkgs.get(names[0], '')
|
||||
|
||||
ret = {}
|
||||
for name in names:
|
||||
if name in available_pkgs:
|
||||
ret[name] = installed_pkgs.get(name, '')
|
||||
else:
|
||||
ret[name] = 'not available'
|
||||
|
||||
ret[name] = installed_pkgs.get(name, '')
|
||||
return ret
|
||||
|
||||
|
||||
@ -429,7 +445,7 @@ def _get_reg_software():
|
||||
'(value not set)',
|
||||
'',
|
||||
None]
|
||||
#encoding = locale.getpreferredencoding()
|
||||
|
||||
reg_software = {}
|
||||
|
||||
hive = 'HKLM'
|
||||
@ -467,7 +483,7 @@ def _get_reg_software():
|
||||
def _refresh_db_conditional(saltenv, **kwargs):
|
||||
'''
|
||||
Internal use only in this module, has a different set of defaults and
|
||||
returns True or False. And supports check the age of the existing
|
||||
returns True or False. And supports checking the age of the existing
|
||||
generated metadata db, as well as ensure metadata db exists to begin with
|
||||
|
||||
Args:
|
||||
@ -481,8 +497,7 @@ def _refresh_db_conditional(saltenv, **kwargs):
|
||||
|
||||
failhard (bool):
|
||||
If ``True``, an error will be raised if any repo SLS files failed to
|
||||
process. If ``False``, no error will be raised, and a dictionary
|
||||
containing the full results will be returned.
|
||||
process.
|
||||
|
||||
Returns:
|
||||
bool: True Fetched or Cache uptodate, False to indicate an issue
|
||||
@ -585,7 +600,7 @@ def refresh_db(**kwargs):
|
||||
# Clear minion repo-ng cache see #35342 discussion
|
||||
log.info('Removing all *.sls files under \'%s\'', repo_details.local_dest)
|
||||
failed = []
|
||||
for root, _, files in os.walk(repo_details.local_dest, followlinks=False):
|
||||
for root, _, files in salt.utils.path.os_walk(repo_details.local_dest, followlinks=False):
|
||||
for name in files:
|
||||
if name.endswith('.sls'):
|
||||
full_filename = os.path.join(root, name)
|
||||
@ -700,8 +715,8 @@ def genrepo(**kwargs):
|
||||
|
||||
verbose (bool):
|
||||
Return verbose data structure which includes 'success_list', a list
|
||||
of all sls files and the package names contained within. Default
|
||||
'False'
|
||||
of all sls files and the package names contained within.
|
||||
Default ``False``.
|
||||
|
||||
failhard (bool):
|
||||
If ``True``, an error will be raised if any repo SLS files failed
|
||||
@ -730,7 +745,7 @@ def genrepo(**kwargs):
|
||||
ret['errors'] = {}
|
||||
repo_details = _get_repo_details(saltenv)
|
||||
|
||||
for root, _, files in os.walk(repo_details.local_dest, followlinks=False):
|
||||
for root, _, files in salt.utils.path.os_walk(repo_details.local_dest, followlinks=False):
|
||||
short_path = os.path.relpath(root, repo_details.local_dest)
|
||||
if short_path == '.':
|
||||
short_path = ''
|
||||
@ -744,11 +759,13 @@ def genrepo(**kwargs):
|
||||
successful_verbose
|
||||
)
|
||||
serial = salt.payload.Serial(__opts__)
|
||||
# TODO: 2016.11 has PY2 mode as 'w+b' develop has 'w+' ? PY3 is 'wb+'
|
||||
# also the reading of this is 'rb' in get_repo_data()
|
||||
mode = 'w+' if six.PY2 else 'wb+'
|
||||
|
||||
with salt.utils.files.fopen(repo_details.winrepo_file, mode) as repo_cache:
|
||||
repo_cache.write(serial.dumps(ret))
|
||||
# save reading it back again. ! this breaks due to utf8 issues
|
||||
#__context__['winrepo.data'] = ret
|
||||
# For some reason we can not save ret into __context__['winrepo.data'] as this breaks due to utf8 issues
|
||||
successful_count = len(successful_verbose)
|
||||
error_count = len(ret['errors'])
|
||||
if verbose:
|
||||
@ -783,7 +800,7 @@ def genrepo(**kwargs):
|
||||
return results
|
||||
|
||||
|
||||
def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
|
||||
def _repo_process_pkg_sls(filename, short_path_name, ret, successful_verbose):
|
||||
renderers = salt.loader.render(__opts__, __salt__)
|
||||
|
||||
def _failed_compile(msg):
|
||||
@ -793,7 +810,7 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
|
||||
|
||||
try:
|
||||
config = salt.template.compile_template(
|
||||
file,
|
||||
filename,
|
||||
renderers,
|
||||
__opts__['renderer'],
|
||||
__opts__.get('renderer_blacklist', ''),
|
||||
@ -808,7 +825,6 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
|
||||
if config:
|
||||
revmap = {}
|
||||
errors = []
|
||||
pkgname_ok_list = []
|
||||
for pkgname, versions in six.iteritems(config):
|
||||
if pkgname in ret['repo']:
|
||||
log.error(
|
||||
@ -817,12 +833,12 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
|
||||
)
|
||||
errors.append('package \'{0}\' already defined'.format(pkgname))
|
||||
break
|
||||
for version, repodata in six.iteritems(versions):
|
||||
for version_str, repodata in six.iteritems(versions):
|
||||
# Ensure version is a string/unicode
|
||||
if not isinstance(version, six.string_types):
|
||||
if not isinstance(version_str, six.string_types):
|
||||
msg = (
|
||||
'package \'{0}\'{{0}}, version number {1} '
|
||||
'is not a string'.format(pkgname, version)
|
||||
'is not a string'.format(pkgname, version_str)
|
||||
)
|
||||
log.error(
|
||||
msg.format(' within \'{0}\''.format(short_path_name))
|
||||
@ -834,7 +850,7 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
|
||||
msg = (
|
||||
'package \'{0}\'{{0}}, repo data for '
|
||||
'version number {1} is not defined as a dictionary '
|
||||
.format(pkgname, version)
|
||||
.format(pkgname, version_str)
|
||||
)
|
||||
log.error(
|
||||
msg.format(' within \'{0}\''.format(short_path_name))
|
||||
@ -845,8 +861,6 @@ def _repo_process_pkg_sls(file, short_path_name, ret, successful_verbose):
|
||||
if errors:
|
||||
ret.setdefault('errors', {})[short_path_name] = errors
|
||||
else:
|
||||
if pkgname not in pkgname_ok_list:
|
||||
pkgname_ok_list.append(pkgname)
|
||||
ret.setdefault('repo', {}).update(config)
|
||||
ret.setdefault('name_map', {}).update(revmap)
|
||||
successful_verbose[short_path_name] = config.keys()
|
||||
@ -921,7 +935,8 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
to install. (no spaces after the commas)
|
||||
|
||||
refresh (bool):
|
||||
Boolean value representing whether or not to refresh the winrepo db
|
||||
Boolean value representing whether or not to refresh the winrepo db.
|
||||
Default ``False``.
|
||||
|
||||
pkgs (list):
|
||||
A list of packages to install from a software repository. All
|
||||
@ -1077,7 +1092,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
for pkg in pkg_params:
|
||||
pkg_params[pkg] = {'version': pkg_params[pkg]}
|
||||
|
||||
if pkg_params is None or len(pkg_params) == 0:
|
||||
if not pkg_params:
|
||||
log.error('No package definition found')
|
||||
return {}
|
||||
|
||||
@ -1108,22 +1123,20 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
ret[pkg_name] = 'Unable to locate package {0}'.format(pkg_name)
|
||||
continue
|
||||
|
||||
# Get the version number passed or the latest available (must be a string)
|
||||
version_num = ''
|
||||
if options:
|
||||
version_num = options.get('version', '')
|
||||
# Using the salt cmdline with version=5.3 might be interpreted
|
||||
# as a float it must be converted to a string in order for
|
||||
# string matching to work.
|
||||
if not isinstance(version_num, six.string_types) and version_num is not None:
|
||||
version_num = str(version_num)
|
||||
version_num = options.get('version', '')
|
||||
# Using the salt cmdline with version=5.3 might be interpreted
|
||||
# as a float it must be converted to a string in order for
|
||||
# string matching to work.
|
||||
if not isinstance(version_num, six.string_types) and version_num is not None:
|
||||
version_num = str(version_num)
|
||||
|
||||
if not version_num:
|
||||
# following can be version number or latest
|
||||
version_num = _get_latest_pkg_version(pkginfo)
|
||||
|
||||
# Check if the version is already installed
|
||||
if version_num in old.get(pkg_name, '').split(',') \
|
||||
or (old.get(pkg_name) == 'Not Found'):
|
||||
or (old.get(pkg_name, '') == 'Not Found'):
|
||||
# Desired version number already installed
|
||||
ret[pkg_name] = {'current': version_num}
|
||||
continue
|
||||
@ -1242,6 +1255,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
log.debug('Source hash matches package hash.')
|
||||
|
||||
# Get install flags
|
||||
|
||||
install_flags = pkginfo[version_num].get('install_flags', '')
|
||||
if options and options.get('extra_install_flags'):
|
||||
install_flags = '{0} {1}'.format(
|
||||
@ -1249,43 +1263,32 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
options.get('extra_install_flags', '')
|
||||
)
|
||||
|
||||
#Compute msiexec string
|
||||
# Compute msiexec string
|
||||
use_msiexec, msiexec = _get_msiexec(pkginfo[version_num].get('msiexec', False))
|
||||
|
||||
# Build cmd and arguments
|
||||
# cmd and arguments must be separated for use with the task scheduler
|
||||
cmd_shell = os.getenv('ComSpec', '{0}\\system32\\cmd.exe'.format(os.getenv('WINDIR')))
|
||||
if use_msiexec:
|
||||
cmd = msiexec
|
||||
arguments = ['/i', cached_pkg]
|
||||
arguments = '"{0}" /I "{1}"'.format(msiexec, cached_pkg)
|
||||
if pkginfo[version_num].get('allusers', True):
|
||||
arguments.append('ALLUSERS="1"')
|
||||
arguments.extend(salt.utils.args.shlex_split(install_flags, posix=False))
|
||||
arguments = '{0} ALLUSERS=1'.format(arguments)
|
||||
else:
|
||||
cmd = cached_pkg
|
||||
arguments = salt.utils.args.shlex_split(install_flags, posix=False)
|
||||
arguments = '"{0}"'.format(cached_pkg)
|
||||
|
||||
if install_flags:
|
||||
arguments = '{0} {1}'.format(arguments, install_flags)
|
||||
|
||||
# Install the software
|
||||
# Check Use Scheduler Option
|
||||
if pkginfo[version_num].get('use_scheduler', False):
|
||||
|
||||
# Build Scheduled Task Parameters
|
||||
if use_msiexec:
|
||||
cmd = msiexec
|
||||
arguments = ['/i', cached_pkg]
|
||||
if pkginfo['version_num'].get('allusers', True):
|
||||
arguments.append('ALLUSERS="1"')
|
||||
arguments.extend(salt.utils.args.shlex_split(install_flags))
|
||||
else:
|
||||
cmd = cached_pkg
|
||||
arguments = salt.utils.args.shlex_split(install_flags)
|
||||
|
||||
# Create Scheduled Task
|
||||
__salt__['task.create_task'](name='update-salt-software',
|
||||
user_name='System',
|
||||
force=True,
|
||||
action_type='Execute',
|
||||
cmd=cmd,
|
||||
arguments=' '.join(arguments),
|
||||
cmd=cmd_shell,
|
||||
arguments='/s /c "{0}"'.format(arguments),
|
||||
start_in=cache_path,
|
||||
trigger_type='Once',
|
||||
start_date='1975-01-01',
|
||||
@ -1327,14 +1330,10 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
|
||||
log.error('Scheduled Task failed to run')
|
||||
ret[pkg_name] = {'install status': 'failed'}
|
||||
else:
|
||||
|
||||
# Combine cmd and arguments
|
||||
cmd = [cmd]
|
||||
cmd.extend(arguments)
|
||||
|
||||
# Launch the command
|
||||
result = __salt__['cmd.run_all'](cmd,
|
||||
result = __salt__['cmd.run_all']('"{0}" /s /c "{1}"'.format(cmd_shell, arguments),
|
||||
cache_path,
|
||||
output_loglevel='trace',
|
||||
python_shell=False,
|
||||
redirect_stderr=True)
|
||||
if not result['retcode']:
|
||||
@ -1413,14 +1412,17 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
.. versionadded:: 0.16.0
|
||||
|
||||
Args:
|
||||
name (str): The name(s) of the package(s) to be uninstalled. Can be a
|
||||
single package or a comma delimted list of packages, no spaces.
|
||||
name (str):
|
||||
The name(s) of the package(s) to be uninstalled. Can be a
|
||||
single package or a comma delimited list of packages, no spaces.
|
||||
|
||||
version (str):
|
||||
The version of the package to be uninstalled. If this option is
|
||||
used to to uninstall multiple packages, then this version will be
|
||||
applied to all targeted packages. Recommended using only when
|
||||
uninstalling a single package. If this parameter is omitted, the
|
||||
latest version will be uninstalled.
|
||||
|
||||
pkgs (list):
|
||||
A list of packages to delete. Must be passed as a python list. The
|
||||
``name`` parameter will be ignored if this option is passed.
|
||||
@ -1510,7 +1512,6 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
removal_targets.append(version_num)
|
||||
|
||||
for target in removal_targets:
|
||||
|
||||
# Get the uninstaller
|
||||
uninstaller = pkginfo[target].get('uninstaller', '')
|
||||
cache_dir = pkginfo[target].get('cache_dir', False)
|
||||
@ -1535,6 +1536,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
# If true, the entire directory will be cached instead of the
|
||||
# individual file. This is useful for installations that are not
|
||||
# single files
|
||||
|
||||
if cache_dir and uninstaller.startswith('salt:'):
|
||||
path, _ = os.path.split(uninstaller)
|
||||
__salt__['cp.cache_dir'](path,
|
||||
@ -1557,6 +1559,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
|
||||
# Compare the hash of the cached installer to the source only if
|
||||
# the file is hosted on salt:
|
||||
# TODO cp.cache_file does cache and hash checking? So why do it again?
|
||||
if uninstaller.startswith('salt:'):
|
||||
if __salt__['cp.hash_file'](uninstaller, saltenv) != \
|
||||
__salt__['cp.hash_file'](cached_pkg):
|
||||
@ -1574,14 +1577,13 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
else:
|
||||
# Run the uninstaller directly
|
||||
# (not hosted on salt:, https:, etc.)
|
||||
cached_pkg = uninstaller
|
||||
cached_pkg = os.path.expandvars(uninstaller)
|
||||
|
||||
# Fix non-windows slashes
|
||||
cached_pkg = cached_pkg.replace('/', '\\')
|
||||
cache_path, _ = os.path.split(cached_pkg)
|
||||
|
||||
# Get parameters for cmd
|
||||
expanded_cached_pkg = str(os.path.expandvars(cached_pkg))
|
||||
# os.path.expandvars is not required as we run everything through cmd.exe /s /c
|
||||
|
||||
# Get uninstall flags
|
||||
uninstall_flags = pkginfo[target].get('uninstall_flags', '')
|
||||
@ -1590,18 +1592,21 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
uninstall_flags = '{0} {1}'.format(
|
||||
uninstall_flags, kwargs.get('extra_uninstall_flags', ''))
|
||||
|
||||
#Compute msiexec string
|
||||
# Compute msiexec string
|
||||
use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False))
|
||||
cmd_shell = os.getenv('ComSpec', '{0}\\system32\\cmd.exe'.format(os.getenv('WINDIR')))
|
||||
|
||||
# Build cmd and arguments
|
||||
# cmd and arguments must be separated for use with the task scheduler
|
||||
if use_msiexec:
|
||||
cmd = msiexec
|
||||
arguments = ['/x']
|
||||
arguments.extend(salt.utils.args.shlex_split(uninstall_flags, posix=False))
|
||||
# Check if uninstaller is set to {guid}, if not we assume its a remote msi file.
|
||||
# which has already been downloaded.
|
||||
arguments = '"{0}" /X "{1}"'.format(msiexec, cached_pkg)
|
||||
else:
|
||||
cmd = expanded_cached_pkg
|
||||
arguments = salt.utils.args.shlex_split(uninstall_flags, posix=False)
|
||||
arguments = '"{0}"'.format(cached_pkg)
|
||||
|
||||
if uninstall_flags:
|
||||
arguments = '{0} {1}'.format(arguments, uninstall_flags)
|
||||
|
||||
# Uninstall the software
|
||||
# Check Use Scheduler Option
|
||||
@ -1611,8 +1616,8 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
user_name='System',
|
||||
force=True,
|
||||
action_type='Execute',
|
||||
cmd=cmd,
|
||||
arguments=' '.join(arguments),
|
||||
cmd=cmd_shell,
|
||||
arguments='/s /c "{0}"'.format(arguments),
|
||||
start_in=cache_path,
|
||||
trigger_type='Once',
|
||||
start_date='1975-01-01',
|
||||
@ -1625,13 +1630,10 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
|
||||
log.error('Scheduled Task failed to run')
|
||||
ret[pkgname] = {'uninstall status': 'failed'}
|
||||
else:
|
||||
# Build the install command
|
||||
cmd = [cmd]
|
||||
cmd.extend(arguments)
|
||||
|
||||
# Launch the command
|
||||
result = __salt__['cmd.run_all'](
|
||||
cmd,
|
||||
'"{0}" /s /c "{1}"'.format(cmd_shell, arguments),
|
||||
output_loglevel='trace',
|
||||
python_shell=False,
|
||||
redirect_stderr=True)
|
||||
if not result['retcode']:
|
||||
@ -1677,11 +1679,13 @@ def purge(name=None, pkgs=None, version=None, **kwargs):
|
||||
|
||||
name (str): The name of the package to be deleted.
|
||||
|
||||
version (str): The version of the package to be deleted. If this option
|
||||
is used in combination with the ``pkgs`` option below, then this
|
||||
version (str):
|
||||
The version of the package to be deleted. If this option is
|
||||
used in combination with the ``pkgs`` option below, then this
|
||||
version will be applied to all targeted packages.
|
||||
|
||||
pkgs (list): A list of packages to delete. Must be passed as a python
|
||||
pkgs (list):
|
||||
A list of packages to delete. Must be passed as a python
|
||||
list. The ``name`` parameter will be ignored if this option is
|
||||
passed.
|
||||
|
||||
@ -1815,4 +1819,20 @@ def compare_versions(ver1='', oper='==', ver2=''):
|
||||
|
||||
salt '*' pkg.compare_versions 1.2 >= 1.3
|
||||
'''
|
||||
return salt.utils.versions.compare(ver1, oper, ver2)
|
||||
if not ver1:
|
||||
raise SaltInvocationError('compare_version, ver1 is blank')
|
||||
if not ver2:
|
||||
raise SaltInvocationError('compare_version, ver2 is blank')
|
||||
|
||||
# Support version being the special meaning of 'latest'
|
||||
if ver1 == 'latest':
|
||||
ver1 = str(sys.maxsize)
|
||||
if ver2 == 'latest':
|
||||
ver2 = str(sys.maxsize)
|
||||
# Support version being the special meaning of 'Not Found'
|
||||
if ver1 == 'Not Found':
|
||||
ver1 = '0.0.0.0.0'
|
||||
if ver2 == 'Not Found':
|
||||
ver2 = '0.0.0.0.0'
|
||||
|
||||
return salt.utils.versions.compare(ver1, oper, ver2, ignore_epoch=True)
|
||||
|
@ -48,6 +48,7 @@ import salt.utils.files
|
||||
import salt.utils.functools
|
||||
import salt.utils.itertools
|
||||
import salt.utils.lazy
|
||||
import salt.utils.path
|
||||
import salt.utils.pkg
|
||||
import salt.utils.pkg.rpm
|
||||
import salt.utils.systemd
|
||||
@ -996,7 +997,7 @@ def list_downloaded():
|
||||
CACHE_DIR = os.path.join('/var/cache/', _yum())
|
||||
|
||||
ret = {}
|
||||
for root, dirnames, filenames in os.walk(CACHE_DIR):
|
||||
for root, dirnames, filenames in salt.utils.path.os_walk(CACHE_DIR):
|
||||
for filename in fnmatch.filter(filenames, '*.rpm'):
|
||||
package_path = os.path.join(root, filename)
|
||||
pkg_info = __salt__['lowpkg.bin_pkg_info'](package_path)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user