Merge pull request #37368 from terminalmage/issue34101

Overhaul archive.extracted state
This commit is contained in:
Mike Place 2016-11-02 20:45:05 +13:00 committed by GitHub
commit 421cfa6e66
9 changed files with 1757 additions and 505 deletions

View File

@ -136,6 +136,62 @@ This has now been corrected. While this is technically a bugfix, we decided to
hold a change in top file merging until a feature release to minimize user
impact.
Improved Archive Extraction Support
===================================
The :py:func:`archive.extracted <salt.states.archive.extracted>` state has been
overhauled. Notable changes include the following:
- When enforcing ownership (with the ``user`` and/or ``group`` arguments), the
``if_missing`` argument no longer has any connection to which path(s) have
ownership enforced. Instead, the paths are determined using the either the
newly-added :py:func:`archive.list <salt.modules.archive.list_>` function, or
the newly-added ``enforce_ownership_on`` argument.
- ``if_missing`` also is no longer required to skip extraction, as Salt is now
able to tell which paths would be present if the archive were extracted. It
should, in most cases, only be necessary in cases where a semaphore file is
used to conditionally skip extraction of the archive.
- Password-protected ZIP archives are now detected before extraction, and the
state fails without attempting to extract the archive if no password was
specified.
- By default, a single top-level directory is enforced, to guard against
'tar-bombs'. This enforcement can be disabled by setting ``enforce_toplevel``
to ``False``.
- The ``tar_options`` and ``zip_options`` arguments have been deprecated in
favor of a single ``options`` argument.
- The ``archive_format`` argument is now optional. The ending of the ``source``
argument is used to guess whether it is a tar, zip or rar file. If the
``archive_format`` cannot be guessed, then it will need to be specified, but
in many cases it can now be omitted.
- Ownership enforcement is now performed irrespective of whether or not the
archive needed to be extracted. This means that the state can be re-run after
the archive has been fully extracted to repair changes to ownership.
A number of new arguments were also added. See the docs py:func:`docs for the
archive.extracted state <salt.states.archive.extracted>` for more information.
Additionally, the following changes have been made to the :mod:`archive
<salt.modules.archive>` execution module:
- A new function (:py:func:`archive.list <salt.modules.archive.list_>`) has
been added. This function lists the files/directories in an archive file, and
supports a ``verbose`` argument that gives a more detailed breakdown of which
paths are files, which are directories, and which paths are at the top level
of the archive.
- A new function (:py:func:`archive.is_encrypted
<salt.modules.archive.is_encrypted>`) has been added. This function will
return ``True`` if the archive is a password-protected ZIP file, ``False`` if
not. If the archive is not a ZIP file, an error will be raised.
- :py:func:`archive.cmd_unzip <salt.modules.archive.cmd_unzip>` now supports
passing a password, bringing it to feature parity with
:py:func:`archive.unzip <salt.modules.archive.unzip>`. Note that this is
still not considered to be secure, and :py:func:`archive.unzip
<salt.modules.archive.unzip>` is recommended for dealing with
password-protected ZIP archives.
- The default value for the ``extract_perms`` argument to
:py:func:`archive.unzip <salt.modules.archive.unzip>` has been changed to
``True``.
Config Changes
==============

View File

@ -5,39 +5,38 @@ A module to wrap (non-Windows) archive calls
.. versionadded:: 2014.1.0
'''
from __future__ import absolute_import
import os
import logging
import contextlib # For < 2.7 compat
import errno
import logging
import os
import re
import shlex
import tarfile
import zipfile
try:
from shlex import quote as _quote # pylint: disable=E0611
except ImportError:
from pipes import quote as _quote
# Import salt libs
from salt.exceptions import SaltInvocationError, CommandExecutionError
from salt.ext.six import string_types, integer_types
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module
import salt.utils
import salt.utils.itertools
# TODO: Check that the passed arguments are correct
# Don't shadow built-in's.
__func_alias__ = {
'zip_': 'zip'
'zip_': 'zip',
'list_': 'list'
}
log = logging.getLogger(__name__)
HAS_ZIPFILE = False
try:
import zipfile
HAS_ZIPFILE = True
except ImportError:
pass
log = logging.getLogger(__name__)
def __virtual__():
if salt.utils.is_windows():
return HAS_ZIPFILE
commands = ('tar', 'gzip', 'gunzip', 'zip', 'unzip', 'rar', 'unrar')
# If none of the above commands are in $PATH this module is a no-go
if not any(salt.utils.which(cmd) for cmd in commands):
@ -45,6 +44,231 @@ def __virtual__():
return True
def list_(name,
archive_format=None,
options=None,
clean=False,
verbose=False,
saltenv='base'):
'''
.. versionadded:: 2016.11.0
List the files and directories in an tar, zip, or rar archive.
.. note::
This function will only provide results for XZ-compressed archives if
xz-utils_ is installed, as Python does not at this time natively
support XZ compression in its tarfile_ module.
name
Path/URL of archive
archive_format
Specify the format of the archive (``tar``, ``zip``, or ``rar``). If
this argument is omitted, the archive format will be guessed based on
the value of the ``name`` parameter.
options
**For tar archives only.** This function will, by default, try to use
the tarfile_ module from the Python standard library to get a list of
files/directories. If this method fails, then it will fall back to
using the shell to decompress the archive to stdout and pipe the
results to ``tar -tf -`` to produce a list of filenames. XZ-compressed
archives are already supported automatically, but in the event that the
tar archive uses a different sort of compression not supported natively
by tarfile_, this option can be used to specify a command that will
decompress the archive to stdout. For example:
.. code-block:: bash
salt minion_id archive.list /path/to/foo.tar.gz options='gzip --decompress --stdout'
.. note::
It is not necessary to manually specify options for gzip'ed
archives, as gzip compression is natively supported by tarfile_.
clean : False
Set this value to ``True`` to delete the path referred to by ``name``
once the contents have been listed. This option should be used with
care.
.. note::
If there is an error listing the archive's contents, the cached
file will not be removed, to allow for troubleshooting.
verbose : False
If ``False``, this function will return a list of files/dirs in the
archive. If ``True``, it will return a dictionary categorizing the
paths into separate keys containing the directory names, file names,
and also directories/files present in the top level of the archive.
saltenv : base
Specifies the fileserver environment from which to retrieve
``archive``. This is only applicable when ``archive`` is a file from
the ``salt://`` fileserver.
.. _tarfile: https://docs.python.org/2/library/tarfile.html
.. _xz-utils: http://tukaani.org/xz/
CLI Examples:
.. code-block:: bash
salt '*' archive.list /path/to/myfile.tar.gz
salt '*' archive.list salt://foo.tar.gz
salt '*' archive.list https://domain.tld/myfile.zip
salt '*' archive.list ftp://10.1.2.3/foo.rar
'''
def _list_tar(name, cached, decompress_cmd):
try:
with contextlib.closing(tarfile.open(cached)) as tar_archive:
return [
x.name + '/' if x.isdir() else x.name
for x in tar_archive.getmembers()
]
except tarfile.ReadError:
if not salt.utils.which('tar'):
raise CommandExecutionError('\'tar\' command not available')
if decompress_cmd is not None:
# Guard against shell injection
try:
decompress_cmd = ' '.join(
[_quote(x) for x in shlex.split(decompress_cmd)]
)
except AttributeError:
raise CommandExecutionError('Invalid CLI options')
else:
if salt.utils.which('xz') \
and __salt__['cmd.retcode'](['xz', '-l', cached],
python_shell=False,
ignore_retcode=True) == 0:
decompress_cmd = 'xz --decompress --stdout'
if decompress_cmd:
cmd = '{0} {1} | tar tf -'.format(decompress_cmd, _quote(cached))
result = __salt__['cmd.run_all'](cmd, python_shell=True)
if result['retcode'] != 0:
raise CommandExecutionError(
'Failed to decompress {0}'.format(name),
info={'error': result['stderr']}
)
ret = []
for line in salt.utils.itertools.split(result['stdout'], '\n'):
line = line.strip()
if line:
ret.append(line)
return ret
raise CommandExecutionError(
'Unable to list contents of {0}. If this is an XZ-compressed tar '
'archive, install xz-utils to enable listing its contents. If it '
'is compressed using something other than XZ, it may be necessary '
'to specify CLI options to decompress the archive. See the '
'documentation for details.'.format(name)
)
def _list_zip(name, cached):
# Password-protected ZIP archives can still be listed by zipfile, so
# there is no reason to invoke the unzip command.
try:
with contextlib.closing(zipfile.ZipFile(cached)) as zip_archive:
return zip_archive.namelist()
except zipfile.BadZipfile:
raise CommandExecutionError('{0} is not a ZIP file'.format(name))
def _list_rar(name, cached):
output = __salt__['cmd.run'](
['rar', 'lt', path],
python_shell=False,
ignore_retcode=False)
matches = re.findall(r'Name:\s*([^\n]+)\s*Type:\s*([^\n]+)', output)
ret = [x + '/' if y == 'Directory' else x for x, y in matches]
if not ret:
raise CommandExecutionError(
'Failed to decompress {0}'.format(name),
info={'error': output}
)
return ret
cached = __salt__['cp.cache_file'](name, saltenv)
if not cached:
raise CommandExecutionError('Failed to cache {0}'.format(name))
try:
parsed = _urlparse(name)
path = parsed.path or parsed.netloc
def _unsupported_format(archive_format):
if archive_format is None:
raise CommandExecutionError(
'Unable to guess archive format, please pass an '
'\'archive_format\' argument.'
)
raise CommandExecutionError(
'Unsupported archive format \'{0}\''.format(archive_format)
)
if not archive_format:
guessed_format = salt.utils.files.guess_archive_type(path)
if guessed_format is None:
_unsupported_format(archive_format)
archive_format = guessed_format
func = locals().get('_list_' + archive_format)
if not hasattr(func, '__call__'):
_unsupported_format(archive_format)
args = (options,) if archive_format == 'tar' else ()
try:
ret = func(name, cached, *args)
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Failed to list contents of {0}: {1}'.format(
name, exc.__str__()
)
)
except CommandExecutionError as exc:
raise
except Exception as exc:
raise CommandExecutionError(
'Uncaught exception \'{0}\' when listing contents of {1}'
.format(exc, name)
)
if clean:
try:
os.remove(cached)
log.debug('Cleaned cached archive %s', cached)
except OSError as exc:
if exc.errno != errno.ENOENT:
log.warning(
'Failed to clean cached archive %s: %s',
cached, exc.__str__()
)
if verbose:
verbose_ret = {'dirs': [],
'files': [],
'top_level_dirs': [],
'top_level_files': []}
for item in ret:
if item.endswith('/'):
verbose_ret['dirs'].append(item)
if item.count('/') == 1:
verbose_ret['top_level_dirs'].append(item)
else:
verbose_ret['files'].append(item)
if item.count('/') == 0:
verbose_ret['top_level_files'].append(item)
ret = verbose_ret
return ret
except CommandExecutionError as exc:
# Reraise with cache path in the error so that the user can examine the
# cached archive for troubleshooting purposes.
info = exc.info or {}
info['archive location'] = cached
raise CommandExecutionError(exc.error, info=info)
@salt.utils.decorators.which('tar')
def tar(options, tarfile, sources=None, dest=None,
cwd=None, template=None, runas=None):
@ -419,8 +643,14 @@ def zip_(zip_file, sources, template=None, cwd=None, runas=None):
@salt.utils.decorators.which('unzip')
def cmd_unzip(zip_file, dest, excludes=None,
template=None, options=None, runas=None, trim_output=False):
def cmd_unzip(zip_file,
dest,
excludes=None,
options=None,
template=None,
runas=None,
trim_output=False,
password=None):
'''
.. versionadded:: 2015.5.0
In versions 2014.7.x and earlier, this function was known as
@ -447,7 +677,7 @@ def cmd_unzip(zip_file, dest, excludes=None,
.. code-block:: bash
salt '*' archive.cmd_unzip template=jinja /tmp/zipfile.zip /tmp/{{grains.id}}/ excludes=file_1,file_2
salt '*' archive.cmd_unzip template=jinja /tmp/zipfile.zip '/tmp/{{grains.id}}' excludes=file_1,file_2
options
Optional when using ``zip`` archives, ignored when usign other archives
@ -466,6 +696,23 @@ def cmd_unzip(zip_file, dest, excludes=None,
The number of files we should output on success before the rest are trimmed, if this is
set to True then it will default to 100
password
Password to use with password protected zip files
.. note::
This is not considered secure. It is recommended to instead use
:py:func:`archive.unzip <salt.modules.archive.unzip>` for
password-protected ZIP files. If a password is used here, then the
unzip command run to extract the ZIP file will not show up in the
minion log like most shell commands Salt runs do. However, the
password will still be present in the events logged to the minion
log at the ``debug`` log level. If the minion is logging at
``debug`` (or more verbose), then be advised that the password will
appear in the log.
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
@ -478,6 +725,8 @@ def cmd_unzip(zip_file, dest, excludes=None,
excludes = [str(excludes)]
cmd = ['unzip']
if password:
cmd.extend(['-P', password])
if options:
cmd.append('{0}'.format(options))
cmd.extend(['{0}'.format(zip_file), '-d', '{0}'.format(dest)])
@ -485,16 +734,30 @@ def cmd_unzip(zip_file, dest, excludes=None,
if excludes is not None:
cmd.append('-x')
cmd.extend(excludes)
files = __salt__['cmd.run'](cmd,
template=template,
runas=runas,
python_shell=False).splitlines()
return _trim_files(files, trim_output)
result = __salt__['cmd.run_all'](
cmd,
template=template,
runas=runas,
python_shell=False,
redirect_stderr=True,
output_loglevel='quiet' if password else 'debug')
if result['retcode'] != 0:
raise CommandExecutionError(result['stdout'])
return _trim_files(result['stdout'].splitlines(), trim_output)
def unzip(zip_file, dest, excludes=None, options=None, template=None,
runas=None, trim_output=False, password=None, extract_perms=False):
def unzip(zip_file,
dest,
excludes=None,
options=None,
template=None,
runas=None,
trim_output=False,
password=None,
extract_perms=True):
'''
Uses the ``zipfile`` Python module to unpack zip files
@ -543,33 +806,37 @@ def unzip(zip_file, dest, excludes=None, options=None, template=None,
salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ excludes=file_1,file_2
password: None
password
Password to use with password protected zip files
.. note::
The password will be present in the events logged to the minion log
file at the ``debug`` log level. If the minion is logging at
``debug`` (or more verbose), then be advised that the password will
appear in the log.
.. versionadded:: 2016.3.0
extract_perms: False
The python zipfile module does not extract file/directory attributes by default.
Setting this flag will attempt to apply the file permision attributes to the
extracted files/folders.
extract_perms : True
The Python zipfile_ module does not extract file/directory attributes
by default. When this argument is set to ``True``, Salt will attempt to
apply the file permision attributes to the extracted files/folders.
On Windows, only the read-only flag will be extracted as set within the zip file,
other attributes (i.e. user/group permissions) are ignored.
On Windows, only the read-only flag will be extracted as set within the
zip file, other attributes (i.e. user/group permissions) are ignored.
Set this argument to ``False`` to disable this behavior.
.. versionadded:: 2016.11.0
.. _zipfile: https://docs.python.org/2/library/zipfile.html
CLI Example:
.. code-block:: bash
salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ password='BadPassword'
'''
# https://bugs.python.org/issue15795
log.warning('Due to bug 15795 in python\'s zip lib, the permissions of the'
' extracted files may not be preserved when using archive.unzip')
log.warning('To preserve the permissions of extracted files, use'
' archive.cmd_unzip')
if not excludes:
excludes = []
if runas:
@ -633,6 +900,65 @@ def unzip(zip_file, dest, excludes=None, options=None, template=None,
return _trim_files(cleaned_files, trim_output)
def is_encrypted(name, clean=False, saltenv='base'):
'''
.. versionadded:: 2016.11.0
Returns ``True`` if the zip archive is password-protected, ``False`` if
not. If the specified file is not a ZIP archive, an error will be raised.
clean : False
Set this value to ``True`` to delete the path referred to by ``name``
once the contents have been listed. This option should be used with
care.
.. note::
If there is an error listing the archive's contents, the cached
file will not be removed, to allow for troubleshooting.
CLI Examples:
.. code-block:: bash
salt '*' archive.is_encrypted /path/to/myfile.zip
salt '*' archive.is_encrypted salt://foo.zip
salt '*' archive.is_encrypted https://domain.tld/myfile.zip clean=True
salt '*' archive.is_encrypted ftp://10.1.2.3/foo.zip
'''
cached = __salt__['cp.cache_file'](name, saltenv)
if not cached:
raise CommandExecutionError('Failed to cache {0}'.format(name))
archive_info = {'archive location': cached}
try:
with contextlib.closing(zipfile.ZipFile(cached)) as zip_archive:
zip_archive.testzip()
except RuntimeError:
ret = True
except zipfile.BadZipfile:
raise CommandExecutionError(
'{0} is not a ZIP file'.format(name),
info=archive_info
)
except Exception as exc:
raise CommandExecutionError(exc.__str__(), info=archive_info)
else:
ret = False
if clean:
try:
os.remove(cached)
log.debug('Cleaned cached archive %s', cached)
except OSError as exc:
if exc.errno != errno.ENOENT:
log.warning(
'Failed to clean cached archive %s: %s',
cached, exc.__str__()
)
return ret
@salt.utils.decorators.which('rar')
def rar(rarfile, sources, template=None, cwd=None, runas=None):
'''

View File

@ -23,6 +23,7 @@ import os
import re
import shutil
import stat
import string
import sys
import tempfile
import time
@ -537,6 +538,101 @@ def get_hash(path, form='sha256', chunk_size=65536):
return salt.utils.get_hash(os.path.expanduser(path), form, chunk_size)
def get_source_sum(source, source_hash, saltenv='base'):
'''
.. versionadded:: 2016.11.0
Obtain a checksum and hash type, given a ``source_hash`` file/expression
and the source file name.
source
Source file, as used in :py:mod:`file <salt.states.file>` and other
states. If ``source_hash`` refers to a file containing hashes, then
this filename will be used to match a filename in that file. If the
``source_hash`` is a hash expression, then this argument will be
ignored.
source_hash
Hash file/expression, as used in :py:mod:`file <salt.states.file>` and
other states. If this value refers to a remote URL or absolute path to
a local file, it will be cached and :py:func:`file.extract_hash
<salt.modules.file.extract_hash>` will be used to obtain a hash from
it.
saltenv : base
Salt fileserver environment from which to retrive the source_hash. This
value will only be used when ``source_hash`` refers to a file on the
Salt fileserver (i.e. one beginning with ``salt://``).
CLI Examples:
.. code-block:: bash
salt '*' file.get_source_sum /etc/foo.conf source_hash=499ae16dcae71eeb7c3a30c75ea7a1a6
salt '*' file.get_source_sum /etc/foo.conf source_hash=md5=499ae16dcae71eeb7c3a30c75ea7a1a6
salt '*' file.get_source_sum /etc/foo.conf source_hash=https://foo.domain.tld/hashfile
'''
def _invalid_source_hash_format():
'''
DRY helper for reporting invalid source_hash input
'''
raise CommandExecutionError(
'Source hash {0} format is invalid. It must be in the format '
'<hash type>=<hash>, or it must be a supported protocol: {1}'
.format(source_hash, ', '.join(salt.utils.files.VALID_PROTOS))
)
hash_fn = None
if os.path.isabs(source_hash):
hash_fn = source_hash
else:
try:
proto = _urlparse(source_hash).scheme
if proto in salt.utils.files.VALID_PROTOS:
hash_fn = __salt__['cp.cache_file'](source_hash, saltenv)
if not hash_fn:
raise CommandExecutionError(
'Source hash file {0} not found'.format(source_hash)
)
else:
if proto != '':
# Some unsupported protocol (e.g. foo://) is being used.
# We'll get into this else block if a hash expression
# (like md5=<md5 checksum here>), but in those cases, the
# protocol will be an empty string, in which case we avoid
# this error condition.
_invalid_source_hash_format()
except (AttributeError, TypeError):
_invalid_source_hash_format()
if hash_fn is not None:
ret = extract_hash(hash_fn, '', source)
if ret is None:
_invalid_source_hash_format()
return ret
else:
# The source_hash is a hash expression
ret = {}
try:
ret['hash_type'], ret['hsum'] = \
[x.strip() for x in source_hash.split('=', 1)]
except AttributeError:
_invalid_source_hash_format()
except ValueError:
# No hash type, try to figure out by hash length
if not re.match('^[{0}]+$'.format(string.hexdigits), source_hash):
_invalid_source_hash_format()
ret['hsum'] = source_hash
source_hash_len = len(source_hash)
for hash_type, hash_len in HASHES:
if source_hash_len == hash_len:
ret['hash_type'] = hash_type
break
else:
_invalid_source_hash_format()
return ret
def check_hash(path, file_hash):
'''
Check if a file matches the given hash string
@ -3501,7 +3597,6 @@ def get_managed(
# Copy the file to the minion and templatize it
sfn = ''
source_sum = {}
remote_protos = ('http', 'https', 'ftp', 'swift', 's3')
def _get_local_file_source_sum(path):
'''
@ -3533,44 +3628,12 @@ def get_managed(
else:
if not skip_verify:
if source_hash:
protos = ('salt', 'file') + remote_protos
def _invalid_source_hash_format():
'''
DRY helper for reporting invalid source_hash input
'''
msg = (
'Source hash {0} format is invalid. It '
'must be in the format <hash type>=<hash>, '
'or it must be a supported protocol: {1}'
.format(source_hash, ', '.join(protos))
)
return '', {}, msg
try:
source_hash_scheme = _urlparse(source_hash).scheme
except TypeError:
return '', {}, ('Invalid format for source_hash '
'parameter')
if source_hash_scheme in protos:
# The source_hash is a file on a server
hash_fn = __salt__['cp.cache_file'](
source_hash, saltenv)
if not hash_fn:
return '', {}, ('Source hash file {0} not found'
.format(source_hash))
source_sum = extract_hash(
hash_fn, '', source_hash_name or name)
if source_sum is None:
return _invalid_source_hash_format()
else:
# The source_hash is a hash string
comps = source_hash.split('=', 1)
if len(comps) < 2:
return _invalid_source_hash_format()
source_sum['hsum'] = comps[1].strip()
source_sum['hash_type'] = comps[0].strip()
source_sum = get_source_sum(source_hash_name or source,
source_hash,
saltenv)
except CommandExecutionError as exc:
return '', {}, exc.strerror
else:
msg = (
'Unable to verify upstream hash of source file {0}, '
@ -3579,7 +3642,7 @@ def get_managed(
)
return '', {}, msg
if source and (template or parsed_scheme in remote_protos):
if source and (template or parsed_scheme in salt.utils.files.REMOTE_PROTOS):
# Check if we have the template or remote file cached
cached_dest = __salt__['cp.is_cached'](source, saltenv)
if cached_dest and (source_hash or skip_verify):
@ -3647,10 +3710,14 @@ def extract_hash(hash_fn, hash_type='sha256', file_name=''):
This routine is called from the :mod:`file.managed
<salt.states.file.managed>` state to pull a hash from a remote file.
Regular expressions are used line by line on the ``source_hash`` file, to
find a potential candidate of the indicated hash type. This avoids many
problems of arbitrary file lay out rules. It specifically permits pulling
find a potential candidate of the indicated hash type. This avoids many
problems of arbitrary file layout rules. It specifically permits pulling
hash codes from debian ``*.dsc`` files.
If no exact match of a hash and filename are found, then the first hash
found (if any) will be returned. If no hashes at all are found, then
``None`` will be returned.
For example:
.. code-block:: yaml
@ -3670,41 +3737,64 @@ def extract_hash(hash_fn, hash_type='sha256', file_name=''):
source_sum = None
partial_id = False
name_sought = os.path.basename(file_name)
log.debug('modules.file.py - extract_hash(): Extracting hash for file '
'named: {0}'.format(name_sought))
with salt.utils.fopen(hash_fn, 'r') as hash_fn_fopen:
for hash_variant in HASHES:
if hash_type == '' or hash_type == hash_variant[0]:
log.debug('modules.file.py - extract_hash(): Will use regex to get'
' a purely hexadecimal number of length ({0}), presumably hash'
' type : {1}'.format(hash_variant[1], hash_variant[0]))
hash_fn_fopen.seek(0)
for line in hash_fn_fopen.read().splitlines():
hash_array = re.findall(r'(?i)(?<![a-z0-9])[a-f0-9]{' + str(hash_variant[1]) + '}(?![a-z0-9])', line)
log.debug('modules.file.py - extract_hash(): From "line": {0} '
'got : {1}'.format(line, hash_array))
if hash_array:
if not partial_id:
source_sum = {'hsum': hash_array[0], 'hash_type': hash_variant[0]}
partial_id = True
log.debug(
'modules.file.py - extract_hash(): Extracting hash for file named: %s',
name_sought
)
try:
with salt.utils.fopen(hash_fn, 'r') as hash_fn_fopen:
for hash_variant in HASHES:
if hash_type == '' or hash_type == hash_variant[0]:
log.debug(
'modules.file.py - extract_hash(): Will use regex to '
'get a purely hexadecimal number of length (%s), '
'presumably hash type : %s',
hash_variant[1], hash_variant[0]
)
hash_fn_fopen.seek(0)
for line in hash_fn_fopen.read().splitlines():
hash_array = re.findall(
r'(?i)(?<![a-z0-9])[a-f0-9]{' + str(hash_variant[1]) + '}(?![a-z0-9])',
line)
log.debug(
'modules.file.py - extract_hash(): From "%s", '
'got : %s', line, hash_array
)
if hash_array:
if not partial_id:
source_sum = {'hsum': hash_array[0],
'hash_type': hash_variant[0]}
partial_id = True
log.debug('modules.file.py - extract_hash(): Found: {0} '
'-- {1}'.format(source_sum['hash_type'],
source_sum['hsum']))
log.debug(
'modules.file.py - extract_hash(): Found: %s '
'-- %s',
source_sum['hash_type'], source_sum['hsum']
)
if re.search(name_sought, line):
source_sum = {'hsum': hash_array[0], 'hash_type': hash_variant[0]}
log.debug('modules.file.py - extract_hash: For {0} -- '
'returning the {1} hash "{2}".'.format(
name_sought,
source_sum['hash_type'],
source_sum['hsum']))
return source_sum
if name_sought in line:
source_sum = {'hsum': hash_array[0],
'hash_type': hash_variant[0]}
log.debug(
'modules.file.py - extract_hash: For %s -- '
'returning the %s hash "%s".',
name_sought, source_sum['hash_type'],
source_sum['hsum']
)
return source_sum
except OSError as exc:
raise CommandExecutionError(
'Error encountered extracting hash from {0}: {1}'.format(
exc.filename, exc.strerror
)
)
if partial_id:
log.debug('modules.file.py - extract_hash: Returning the partially '
'identified {0} hash "{1}".'.format(
source_sum['hash_type'], source_sum['hsum']))
log.debug(
'modules.file.py - extract_hash: Returning the partially '
'identified %s hash "%s".',
source_sum['hash_type'], source_sum['hsum']
)
else:
log.debug('modules.file.py - extract_hash: Returning None.')
return source_sum
@ -4163,7 +4253,8 @@ def manage_file(name,
dir_mode=None,
follow_symlinks=True,
skip_verify=False,
keep_mode=False):
keep_mode=False,
**kwargs):
'''
Checks the destination against what was retrieved with get_managed and
makes the appropriate modifications (if necessary).

File diff suppressed because it is too large Load Diff

View File

@ -1474,10 +1474,10 @@ def managed(name,
``check_cmd``.
tmp_ext
provide extention for temp file created by check_cmd
useful for checkers dependant on config file extention
for example it should be useful for init-checkconf upstart config checker
by default it is empty
Suffix for temp file created by ``check_cmd``. Useful for checkers
dependant on config file extension (e.g. the init-checkconf upstart
config checker).
.. code-block:: yaml
/etc/init/test.conf:
@ -1791,7 +1791,7 @@ def managed(name,
tmp_filename = None
if check_cmd:
tmp_filename = salt.utils.mkstemp()+tmp_ext
tmp_filename = salt.utils.mkstemp(suffix=tmp_ext)
# if exists copy existing file to tmp to compare
if __salt__['file.file_exists'](name):
@ -1824,7 +1824,8 @@ def managed(name,
dir_mode,
follow_symlinks,
skip_verify,
keep_mode)
keep_mode,
**kwargs)
except Exception as exc:
ret['changes'] = {}
log.debug(traceback.format_exc())
@ -1882,7 +1883,8 @@ def managed(name,
dir_mode,
follow_symlinks,
skip_verify,
keep_mode)
keep_mode,
**kwargs)
except Exception as exc:
ret['changes'] = {}
log.debug(traceback.format_exc())

View File

@ -22,6 +22,23 @@ from salt.ext import six
log = logging.getLogger(__name__)
TEMPFILE_PREFIX = '__salt.tmp.'
REMOTE_PROTOS = ('http', 'https', 'ftp', 'swift', 's3')
VALID_PROTOS = ('salt', 'file') + REMOTE_PROTOS
def guess_archive_type(name):
'''
Guess an archive type (tar, zip, or rar) by its file extension
'''
name = name.lower()
for ending in ('tar', 'tar.gz', 'tar.bz2', 'tar.xz', 'tgz', 'tbz2', 'txz',
'tar.lzma', 'tlz'):
if name.endswith('.' + ending):
return 'tar'
for ending in ('zip', 'rar'):
if name.endswith('.' + ending):
return ending
return None
def recursive_copy(source, dest):

View File

@ -186,8 +186,19 @@ class ArchiveTestCase(TestCase):
@patch('salt.utils.which', lambda exe: exe)
def test_cmd_unzip(self):
mock = MagicMock(return_value='salt')
with patch.dict(archive.__salt__, {'cmd.run': mock}):
def _get_mock():
'''
Create a new MagicMock for each scenario in this test, so that
assert_called_once_with doesn't complain that the same mock object
is called more than once.
'''
return MagicMock(return_value={'stdout': 'salt',
'stderr': '',
'pid': 12345,
'retcode': 0})
mock = _get_mock()
with patch.dict(archive.__salt__, {'cmd.run_all': mock}):
ret = archive.cmd_unzip(
'/tmp/salt.{{grains.id}}.zip',
'/tmp/dest',
@ -198,11 +209,15 @@ class ArchiveTestCase(TestCase):
mock.assert_called_once_with(
['unzip', '/tmp/salt.{{grains.id}}.zip', '-d', '/tmp/dest',
'-x', '/tmp/tmpePe8yO', '/tmp/tmpLeSw1A'],
runas=None, python_shell=False, template='jinja'
output_loglevel='debug',
python_shell=False,
redirect_stderr=True,
runas=None,
template='jinja'
)
mock = MagicMock(return_value='salt')
with patch.dict(archive.__salt__, {'cmd.run': mock}):
mock = _get_mock()
with patch.dict(archive.__salt__, {'cmd.run_all': mock}):
ret = archive.cmd_unzip(
'/tmp/salt.{{grains.id}}.zip',
'/tmp/dest',
@ -213,11 +228,15 @@ class ArchiveTestCase(TestCase):
mock.assert_called_once_with(
['unzip', '/tmp/salt.{{grains.id}}.zip', '-d', '/tmp/dest',
'-x', '/tmp/tmpePe8yO', '/tmp/tmpLeSw1A'],
runas=None, python_shell=False, template='jinja'
output_loglevel='debug',
python_shell=False,
redirect_stderr=True,
runas=None,
template='jinja'
)
mock = MagicMock(return_value='salt')
with patch.dict(archive.__salt__, {'cmd.run': mock}):
mock = _get_mock()
with patch.dict(archive.__salt__, {'cmd.run_all': mock}):
ret = archive.cmd_unzip(
'/tmp/salt.{{grains.id}}.zip',
'/tmp/dest',
@ -229,11 +248,15 @@ class ArchiveTestCase(TestCase):
mock.assert_called_once_with(
['unzip', '-fo', '/tmp/salt.{{grains.id}}.zip', '-d',
'/tmp/dest', '-x', '/tmp/tmpePe8yO', '/tmp/tmpLeSw1A'],
runas=None, python_shell=False, template='jinja'
output_loglevel='debug',
python_shell=False,
redirect_stderr=True,
runas=None,
template='jinja'
)
mock = MagicMock(return_value='salt')
with patch.dict(archive.__salt__, {'cmd.run': mock}):
mock = _get_mock()
with patch.dict(archive.__salt__, {'cmd.run_all': mock}):
ret = archive.cmd_unzip(
'/tmp/salt.{{grains.id}}.zip',
'/tmp/dest',
@ -245,7 +268,32 @@ class ArchiveTestCase(TestCase):
mock.assert_called_once_with(
['unzip', '-fo', '/tmp/salt.{{grains.id}}.zip', '-d',
'/tmp/dest', '-x', '/tmp/tmpePe8yO', '/tmp/tmpLeSw1A'],
runas=None, python_shell=False, template='jinja'
output_loglevel='debug',
python_shell=False,
redirect_stderr=True,
runas=None,
template='jinja'
)
mock = _get_mock()
with patch.dict(archive.__salt__, {'cmd.run_all': mock}):
ret = archive.cmd_unzip(
'/tmp/salt.{{grains.id}}.zip',
'/tmp/dest',
excludes=['/tmp/tmpePe8yO', '/tmp/tmpLeSw1A'],
template='jinja',
options='-fo',
password='asdf',
)
self.assertEqual(['salt'], ret)
mock.assert_called_once_with(
['unzip', '-P', 'asdf', '-fo', '/tmp/salt.{{grains.id}}.zip',
'-d', '/tmp/dest', '-x', '/tmp/tmpePe8yO', '/tmp/tmpLeSw1A'],
output_loglevel='quiet',
python_shell=False,
redirect_stderr=True,
runas=None,
template='jinja'
)
def test_unzip(self):
@ -255,7 +303,8 @@ class ArchiveTestCase(TestCase):
'/tmp/salt.{{grains.id}}.zip',
'/tmp/dest',
excludes='/tmp/tmpePe8yO,/tmp/tmpLeSw1A',
template='jinja'
template='jinja',
extract_perms=False
)
self.assertEqual(['salt'], ret)

View File

@ -45,7 +45,7 @@ class ArchiveTestCase(TestCase):
archive.extracted tar options
'''
source = 'file.tar.gz'
source = '/tmp/file.tar.gz'
tmp_dir = os.path.join(tempfile.gettempdir(), 'test_archive', '')
test_tar_opts = [
'--no-anchored foo',
@ -66,47 +66,67 @@ class ArchiveTestCase(TestCase):
mock_false = MagicMock(return_value=False)
ret = {'stdout': ['saltines', 'cheese'], 'stderr': 'biscuits', 'retcode': '31337', 'pid': '1337'}
mock_run = MagicMock(return_value=ret)
mock_source_list = MagicMock(return_value=source)
mock_source_list = MagicMock(return_value=(source, None))
state_single_mock = MagicMock(return_value={'local': {'result': True}})
list_mock = MagicMock(return_value={
'dirs': [],
'files': ['saltines', 'cheese'],
'top_level_dirs': [],
'top_level_files': ['saltines', 'cheese'],
})
with patch('os.path.exists', mock_true):
with patch.dict(archive.__opts__, {'test': False,
'cachedir': tmp_dir}):
with patch.dict(archive.__salt__, {'file.directory_exists': mock_false,
'file.file_exists': mock_false,
'file.makedirs': mock_true,
'cmd.run_all': mock_run,
'file.source_list': mock_source_list}):
filename = os.path.join(
tmp_dir,
'files/test/_tmp_test_archive_.tar'
)
for test_opts, ret_opts in zip(test_tar_opts, ret_tar_opts):
ret = archive.extracted(tmp_dir,
source,
'tar',
tar_options=test_opts)
ret_opts.append(filename)
mock_run.assert_called_with(ret_opts, cwd=tmp_dir, python_shell=False)
with patch.dict(archive.__opts__, {'test': False,
'cachedir': tmp_dir}):
with patch.dict(archive.__salt__, {'file.directory_exists': mock_false,
'file.file_exists': mock_false,
'state.single': state_single_mock,
'file.makedirs': mock_true,
'cmd.run_all': mock_run,
'archive.list': list_mock,
'file.source_list': mock_source_list}):
filename = os.path.join(
tmp_dir,
'files/test/_tmp_file.tar.gz'
)
for test_opts, ret_opts in zip(test_tar_opts, ret_tar_opts):
ret = archive.extracted(tmp_dir,
source,
options=test_opts,
enforce_toplevel=False)
ret_opts.append(filename)
mock_run.assert_called_with(ret_opts, cwd=tmp_dir, python_shell=False)
def test_tar_gnutar(self):
'''
Tests the call of extraction with gnutar
'''
gnutar = MagicMock(return_value='tar (GNU tar)')
source = 'GNU tar'
source = '/tmp/foo.tar.gz'
missing = MagicMock(return_value=False)
nop = MagicMock(return_value=True)
state_single_mock = MagicMock(return_value={'local': {'result': True}})
run_all = MagicMock(return_value={'retcode': 0, 'stdout': 'stdout', 'stderr': 'stderr'})
mock_source_list = MagicMock(return_value=source)
mock_source_list = MagicMock(return_value=(source, None))
list_mock = MagicMock(return_value={
'dirs': [],
'files': ['stdout'],
'top_level_dirs': [],
'top_level_files': ['stdout'],
})
with patch.dict(archive.__salt__, {'cmd.run': gnutar,
'file.directory_exists': missing,
'file.file_exists': missing,
'state.single': nop,
'state.single': state_single_mock,
'file.makedirs': nop,
'cmd.run_all': run_all,
'archive.list': list_mock,
'file.source_list': mock_source_list}):
ret = archive.extracted('/tmp/out', '/tmp/foo.tar.gz', 'tar', tar_options='xvzf', keep=True)
ret = archive.extracted('/tmp/out',
source,
options='xvzf',
enforce_toplevel=False,
keep=True)
self.assertEqual(ret['changes']['extracted_files'], 'stdout')
def test_tar_bsdtar(self):
@ -114,20 +134,32 @@ class ArchiveTestCase(TestCase):
Tests the call of extraction with bsdtar
'''
bsdtar = MagicMock(return_value='tar (bsdtar)')
source = 'bsdtar'
source = '/tmp/foo.tar.gz'
missing = MagicMock(return_value=False)
nop = MagicMock(return_value=True)
state_single_mock = MagicMock(return_value={'local': {'result': True}})
run_all = MagicMock(return_value={'retcode': 0, 'stdout': 'stdout', 'stderr': 'stderr'})
mock_source_list = MagicMock(return_value=source)
mock_source_list = MagicMock(return_value=(source, None))
list_mock = MagicMock(return_value={
'dirs': [],
'files': ['stderr'],
'top_level_dirs': [],
'top_level_files': ['stderr'],
})
with patch.dict(archive.__salt__, {'cmd.run': bsdtar,
'file.directory_exists': missing,
'file.file_exists': missing,
'state.single': nop,
'state.single': state_single_mock,
'file.makedirs': nop,
'cmd.run_all': run_all,
'archive.list': list_mock,
'file.source_list': mock_source_list}):
ret = archive.extracted('/tmp/out', '/tmp/foo.tar.gz', 'tar', tar_options='xvzf', keep=True)
ret = archive.extracted('/tmp/out',
source,
options='xvzf',
enforce_toplevel=False,
keep=True)
self.assertEqual(ret['changes']['extracted_files'], 'stderr')
if __name__ == '__main__':

View File

@ -3,11 +3,18 @@
# Import python libs
from __future__ import absolute_import
from datetime import datetime
from dateutil.relativedelta import relativedelta
import json
import pprint
import tempfile
try:
from dateutil.relativedelta import relativedelta
HAS_DATEUTIL = True
except ImportError:
HAS_DATEUTIL = False
NO_DATEUTIL_REASON = 'python-dateutil is not installed'
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import destructiveTest, ensure_in_syspath
@ -1655,6 +1662,7 @@ class FileTestCase(TestCase):
self.assertTrue(filestate.mod_run_check_cmd(cmd, filename))
@skipIf(not HAS_DATEUTIL, NO_DATEUTIL_REASON)
def test_retention_schedule(self):
'''
Test to execute the retention_schedule logic.