mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 09:23:56 +00:00
Merge pull request #20594 from basepi/merge-forward-develop
Merge forward from 2015.2 to develop
This commit is contained in:
commit
a572be2ca3
@ -15,7 +15,8 @@ Version 2014.7.2 is a bugfix release for :doc:`2014.7.0
|
||||
to lowercase their npm package names for them. The :py:mod:`npm module
|
||||
<salt.modules.npm>` was never affected by mandatory lowercasing.
|
||||
(:issue:`20329`)
|
||||
- Deprecate the `activate` parameter for pip.install for both the
|
||||
- Deprecate the ``activate`` parameter for pip.install for both the
|
||||
:py:mod:`module <salt.modules.pip>` and the :py:mod:`state <salt.state.pip>`.
|
||||
If `bin_env` is given and points to a virtualenv, there is no need to
|
||||
If ``bin_env`` is given and points to a virtualenv, there is no need to
|
||||
activate that virtualenv in a shell for pip to install to the virtualenv.
|
||||
- Fix a file-locking bug in gitfs (:issue:`18839`)
|
||||
|
@ -452,6 +452,8 @@ class Fileserver(object):
|
||||
fstr = '{0}.file_list'.format(fsb)
|
||||
if fstr in self.servers:
|
||||
ret.update(self.servers[fstr](load))
|
||||
# upgrade all set elements to a common encoding
|
||||
ret = [salt.utils.sdecode(f) for f in ret]
|
||||
# some *fs do not handle prefix. Ensure it is filtered
|
||||
prefix = load.get('prefix', '').strip('/')
|
||||
if prefix != '':
|
||||
@ -478,6 +480,8 @@ class Fileserver(object):
|
||||
fstr = '{0}.file_list_emptydirs'.format(fsb)
|
||||
if fstr in self.servers:
|
||||
ret.update(self.servers[fstr](load))
|
||||
# upgrade all set elements to a common encoding
|
||||
ret = [salt.utils.sdecode(f) for f in ret]
|
||||
# some *fs do not handle prefix. Ensure it is filtered
|
||||
prefix = load.get('prefix', '').strip('/')
|
||||
if prefix != '':
|
||||
@ -504,6 +508,8 @@ class Fileserver(object):
|
||||
fstr = '{0}.dir_list'.format(fsb)
|
||||
if fstr in self.servers:
|
||||
ret.update(self.servers[fstr](load))
|
||||
# upgrade all set elements to a common encoding
|
||||
ret = [salt.utils.sdecode(f) for f in ret]
|
||||
# some *fs do not handle prefix. Ensure it is filtered
|
||||
prefix = load.get('prefix', '').strip('/')
|
||||
if prefix != '':
|
||||
@ -530,6 +536,10 @@ class Fileserver(object):
|
||||
symlstr = '{0}.symlink_list'.format(fsb)
|
||||
if symlstr in self.servers:
|
||||
ret = self.servers[symlstr](load)
|
||||
# upgrade all set elements to a common encoding
|
||||
ret = dict([
|
||||
(salt.utils.sdecode(x), salt.utils.sdecode(y)) for x, y in ret.items()
|
||||
])
|
||||
# some *fs do not handle prefix. Ensure it is filtered
|
||||
prefix = load.get('prefix', '').strip('/')
|
||||
if prefix != '':
|
||||
|
@ -44,7 +44,9 @@ Walkthrough <tutorial-gitfs>`.
|
||||
# Import python libs
|
||||
from __future__ import absolute_import
|
||||
import copy
|
||||
import distutils.version # pylint: disable=import-error,no-name-in-module
|
||||
import contextlib
|
||||
import distutils.version # pylint: disable=E0611
|
||||
import fcntl
|
||||
import glob
|
||||
import hashlib
|
||||
import logging
|
||||
@ -894,7 +896,8 @@ def purge_cache():
|
||||
remove_dirs = []
|
||||
for repo in init():
|
||||
try:
|
||||
remove_dirs.remove(repo['hash'])
|
||||
with _aquire_update_lock_for_repo(repo):
|
||||
remove_dirs.remove(repo['hash'])
|
||||
except ValueError:
|
||||
pass
|
||||
remove_dirs = [os.path.join(bp_, rdir) for rdir in remove_dirs
|
||||
@ -906,6 +909,37 @@ def purge_cache():
|
||||
return False
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _aquire_update_lock_for_repo(repo):
|
||||
provider = _get_provider()
|
||||
|
||||
if provider == 'gitpython':
|
||||
working_dir = repo['repo'].working_dir
|
||||
elif provider == 'pygit2':
|
||||
working_dir = repo['repo'].workdir
|
||||
elif provider == 'dulwich':
|
||||
working_dir = repo['repo'].path
|
||||
|
||||
with wait_for_write_lock(os.path.join(working_dir, 'update.lk')):
|
||||
yield
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def wait_for_write_lock(filename):
|
||||
fhandle = open(filename, 'w')
|
||||
|
||||
if salt.utils.is_fcntl_available(check_sunos=True):
|
||||
fcntl.flock(fhandle.fileno(), fcntl.LOCK_EX)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if salt.utils.is_fcntl_available(check_sunos=True):
|
||||
fcntl.flock(fhandle.fileno(), fcntl.LOCK_UN)
|
||||
|
||||
fhandle.close()
|
||||
os.remove(filename)
|
||||
|
||||
|
||||
def update():
|
||||
'''
|
||||
Execute a git fetch on all of the repos
|
||||
@ -927,98 +961,93 @@ def update():
|
||||
# origin is just a url here, there is no origin object
|
||||
origin = repo['url']
|
||||
working_dir = repo['repo'].path
|
||||
lk_fn = os.path.join(working_dir, 'update.lk')
|
||||
with salt.utils.fopen(lk_fn, 'w+') as fp_:
|
||||
fp_.write(str(pid))
|
||||
try:
|
||||
log.debug('Fetching from {0}'.format(repo['url']))
|
||||
if provider == 'gitpython':
|
||||
try:
|
||||
fetch_results = origin.fetch()
|
||||
except AssertionError:
|
||||
fetch_results = origin.fetch()
|
||||
for fetch in fetch_results:
|
||||
if fetch.old_commit is not None:
|
||||
data['changed'] = True
|
||||
elif provider == 'pygit2':
|
||||
try:
|
||||
origin.credentials = repo['credentials']
|
||||
except KeyError:
|
||||
# No credentials configured for this repo
|
||||
pass
|
||||
fetch = origin.fetch()
|
||||
try:
|
||||
# pygit2.Remote.fetch() returns a dict in pygit2 < 0.21.0
|
||||
received_objects = fetch['received_objects']
|
||||
except (AttributeError, TypeError):
|
||||
# pygit2.Remote.fetch() returns a class instance in
|
||||
# pygit2 >= 0.21.0
|
||||
received_objects = fetch.received_objects
|
||||
log.debug(
|
||||
'Gitfs received {0} objects for remote {1}'
|
||||
.format(received_objects, repo['url'])
|
||||
)
|
||||
if received_objects:
|
||||
data['changed'] = True
|
||||
elif provider == 'dulwich':
|
||||
client, path = \
|
||||
dulwich.client.get_transport_and_path_from_url(
|
||||
origin, thin_packs=True
|
||||
)
|
||||
refs_pre = repo['repo'].get_refs()
|
||||
try:
|
||||
refs_post = client.fetch(path, repo['repo'])
|
||||
except dulwich.errors.NotGitRepository:
|
||||
log.critical(
|
||||
'Dulwich does not recognize remote {0} as a valid '
|
||||
'remote URL. Perhaps it is missing \'.git\' at the '
|
||||
'end.'.format(repo['url'])
|
||||
)
|
||||
continue
|
||||
except KeyError:
|
||||
log.critical(
|
||||
'Local repository cachedir {0!r} (corresponding '
|
||||
'remote: {1}) has been corrupted. Salt will now '
|
||||
'attempt to remove the local checkout to allow it to '
|
||||
'be re-initialized in the next fileserver cache '
|
||||
'update.'
|
||||
.format(repo['cachedir'], repo['url'])
|
||||
)
|
||||
|
||||
with _aquire_update_lock_for_repo(repo):
|
||||
try:
|
||||
log.debug('Fetching from {0}'.format(repo['url']))
|
||||
if provider == 'gitpython':
|
||||
try:
|
||||
salt.utils.rm_rf(repo['cachedir'])
|
||||
except OSError as exc:
|
||||
log.critical(
|
||||
'Unable to remove {0!r}: {1}'
|
||||
.format(repo['cachedir'], exc)
|
||||
)
|
||||
continue
|
||||
if refs_post is None:
|
||||
# Empty repository
|
||||
log.warning(
|
||||
'Gitfs remote {0!r} is an empty repository and will '
|
||||
'be skipped.'.format(origin)
|
||||
fetch_results = origin.fetch()
|
||||
except AssertionError:
|
||||
fetch_results = origin.fetch()
|
||||
for fetch in fetch_results:
|
||||
if fetch.old_commit is not None:
|
||||
data['changed'] = True
|
||||
elif provider == 'pygit2':
|
||||
try:
|
||||
origin.credentials = repo['credentials']
|
||||
except KeyError:
|
||||
# No credentials configured for this repo
|
||||
pass
|
||||
fetch = origin.fetch()
|
||||
try:
|
||||
# pygit2.Remote.fetch() returns a dict in pygit2 < 0.21.0
|
||||
received_objects = fetch['received_objects']
|
||||
except (AttributeError, TypeError):
|
||||
# pygit2.Remote.fetch() returns a class instance in
|
||||
# pygit2 >= 0.21.0
|
||||
received_objects = fetch.received_objects
|
||||
log.debug(
|
||||
'Gitfs received {0} objects for remote {1}'
|
||||
.format(received_objects, repo['url'])
|
||||
)
|
||||
continue
|
||||
if refs_pre != refs_post:
|
||||
data['changed'] = True
|
||||
# Update local refs
|
||||
for ref in _dulwich_env_refs(refs_post):
|
||||
repo['repo'][ref] = refs_post[ref]
|
||||
# Prune stale refs
|
||||
for ref in repo['repo'].get_refs():
|
||||
if ref not in refs_post:
|
||||
del repo['repo'][ref]
|
||||
except Exception as exc:
|
||||
# Do not use {0!r} in the error message, as exc is not a string
|
||||
log.error(
|
||||
'Exception \'{0}\' caught while fetching gitfs remote {1}'
|
||||
.format(exc, repo['url']),
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
)
|
||||
try:
|
||||
os.remove(lk_fn)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
if received_objects:
|
||||
data['changed'] = True
|
||||
elif provider == 'dulwich':
|
||||
client, path = \
|
||||
dulwich.client.get_transport_and_path_from_url(
|
||||
origin, thin_packs=True
|
||||
)
|
||||
refs_pre = repo['repo'].get_refs()
|
||||
try:
|
||||
refs_post = client.fetch(path, repo['repo'])
|
||||
except dulwich.errors.NotGitRepository:
|
||||
log.critical(
|
||||
'Dulwich does not recognize remote {0} as a valid '
|
||||
'remote URL. Perhaps it is missing \'.git\' at the '
|
||||
'end.'.format(repo['url'])
|
||||
)
|
||||
continue
|
||||
except KeyError:
|
||||
log.critical(
|
||||
'Local repository cachedir {0!r} (corresponding '
|
||||
'remote: {1}) has been corrupted. Salt will now '
|
||||
'attempt to remove the local checkout to allow it to '
|
||||
'be re-initialized in the next fileserver cache '
|
||||
'update.'
|
||||
.format(repo['cachedir'], repo['url'])
|
||||
)
|
||||
try:
|
||||
salt.utils.rm_rf(repo['cachedir'])
|
||||
except OSError as exc:
|
||||
log.critical(
|
||||
'Unable to remove {0!r}: {1}'
|
||||
.format(repo['cachedir'], exc)
|
||||
)
|
||||
continue
|
||||
if refs_post is None:
|
||||
# Empty repository
|
||||
log.warning(
|
||||
'Gitfs remote {0!r} is an empty repository and will '
|
||||
'be skipped.'.format(origin)
|
||||
)
|
||||
continue
|
||||
if refs_pre != refs_post:
|
||||
data['changed'] = True
|
||||
# Update local refs
|
||||
for ref in _dulwich_env_refs(refs_post):
|
||||
repo['repo'][ref] = refs_post[ref]
|
||||
# Prune stale refs
|
||||
for ref in repo['repo'].get_refs():
|
||||
if ref not in refs_post:
|
||||
del repo['repo'][ref]
|
||||
except Exception as exc:
|
||||
# Do not use {0!r} in the error message, as exc is not a string
|
||||
log.error(
|
||||
'Exception \'{0}\' caught while fetching gitfs remote {1}'
|
||||
.format(exc, repo['url']),
|
||||
exc_info_on_loglevel=logging.DEBUG
|
||||
)
|
||||
|
||||
env_cache = os.path.join(__opts__['cachedir'], 'gitfs/envs.p')
|
||||
if data.get('changed', False) is True or not os.path.isfile(env_cache):
|
||||
@ -1179,155 +1208,157 @@ def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
|
||||
'{0}.lk'.format(path))
|
||||
destdir = os.path.dirname(dest)
|
||||
hashdir = os.path.dirname(blobshadest)
|
||||
if not os.path.isdir(destdir):
|
||||
try:
|
||||
os.makedirs(destdir)
|
||||
except OSError:
|
||||
# Path exists and is a file, remove it and retry
|
||||
os.remove(destdir)
|
||||
os.makedirs(destdir)
|
||||
if not os.path.isdir(hashdir):
|
||||
try:
|
||||
os.makedirs(hashdir)
|
||||
except OSError:
|
||||
# Path exists and is a file, remove it and retry
|
||||
os.remove(hashdir)
|
||||
os.makedirs(hashdir)
|
||||
|
||||
for repo in init():
|
||||
if repo['mountpoint'] \
|
||||
and not path.startswith(repo['mountpoint'] + os.path.sep):
|
||||
continue
|
||||
repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep)
|
||||
if repo['root']:
|
||||
repo_path = os.path.join(repo['root'], repo_path)
|
||||
|
||||
blob = None
|
||||
depth = 0
|
||||
if provider == 'gitpython':
|
||||
tree = _get_tree_gitpython(repo, tgt_env)
|
||||
if not tree:
|
||||
# Branch/tag/SHA not found in repo, try the next
|
||||
continue
|
||||
while True:
|
||||
depth += 1
|
||||
if depth > SYMLINK_RECURSE_DEPTH:
|
||||
break
|
||||
with _aquire_update_lock_for_repo(repo):
|
||||
if not os.path.isdir(destdir):
|
||||
try:
|
||||
file_blob = tree / repo_path
|
||||
if stat.S_ISLNK(file_blob.mode):
|
||||
# Path is a symlink. The blob data corresponding to
|
||||
# this path's object ID will be the target of the
|
||||
# symlink. Follow the symlink and set repo_path to the
|
||||
# location indicated in the blob data.
|
||||
stream = six.StringIO()
|
||||
file_blob.stream_data(stream)
|
||||
stream.seek(0)
|
||||
link_tgt = stream.read()
|
||||
stream.close()
|
||||
repo_path = os.path.normpath(
|
||||
os.path.join(os.path.dirname(repo_path), link_tgt)
|
||||
)
|
||||
else:
|
||||
blob = file_blob
|
||||
break
|
||||
except KeyError:
|
||||
# File not found or repo_path points to a directory
|
||||
break
|
||||
if blob is None:
|
||||
continue
|
||||
blob_hexsha = blob.hexsha
|
||||
|
||||
elif provider == 'pygit2':
|
||||
tree = _get_tree_pygit2(repo, tgt_env)
|
||||
if not tree:
|
||||
# Branch/tag/SHA not found in repo, try the next
|
||||
continue
|
||||
while True:
|
||||
depth += 1
|
||||
if depth > SYMLINK_RECURSE_DEPTH:
|
||||
break
|
||||
os.makedirs(destdir)
|
||||
except OSError:
|
||||
# Path exists and is a file, remove it and retry
|
||||
os.remove(destdir)
|
||||
os.makedirs(destdir)
|
||||
if not os.path.isdir(hashdir):
|
||||
try:
|
||||
if stat.S_ISLNK(tree[repo_path].filemode):
|
||||
# Path is a symlink. The blob data corresponding to this
|
||||
# path's object ID will be the target of the symlink. Follow
|
||||
# the symlink and set repo_path to the location indicated
|
||||
# in the blob data.
|
||||
link_tgt = repo['repo'][tree[repo_path].oid].data
|
||||
repo_path = os.path.normpath(
|
||||
os.path.join(os.path.dirname(repo_path), link_tgt)
|
||||
)
|
||||
else:
|
||||
oid = tree[repo_path].oid
|
||||
blob = repo['repo'][oid]
|
||||
except KeyError:
|
||||
break
|
||||
if blob is None:
|
||||
continue
|
||||
blob_hexsha = blob.hex
|
||||
os.makedirs(hashdir)
|
||||
except OSError:
|
||||
# Path exists and is a file, remove it and retry
|
||||
os.remove(hashdir)
|
||||
os.makedirs(hashdir)
|
||||
|
||||
elif provider == 'dulwich':
|
||||
while True:
|
||||
depth += 1
|
||||
if depth > SYMLINK_RECURSE_DEPTH:
|
||||
break
|
||||
prefix_dirs, _, filename = repo_path.rpartition(os.path.sep)
|
||||
tree = _get_tree_dulwich(repo, tgt_env)
|
||||
tree = _dulwich_walk_tree(repo['repo'], tree, prefix_dirs)
|
||||
if not isinstance(tree, dulwich.objects.Tree):
|
||||
# Branch/tag/SHA not found in repo
|
||||
break
|
||||
try:
|
||||
mode, oid = tree[filename]
|
||||
if stat.S_ISLNK(mode):
|
||||
# Path is a symlink. The blob data corresponding to
|
||||
# this path's object ID will be the target of the
|
||||
# symlink. Follow the symlink and set repo_path to the
|
||||
# location indicated in the blob data.
|
||||
link_tgt = repo['repo'].get_object(oid).as_raw_string()
|
||||
repo_path = os.path.normpath(
|
||||
os.path.join(os.path.dirname(repo_path), link_tgt)
|
||||
)
|
||||
else:
|
||||
blob = repo['repo'].get_object(oid)
|
||||
break
|
||||
except KeyError:
|
||||
break
|
||||
if blob is None:
|
||||
if repo['mountpoint'] \
|
||||
and not path.startswith(repo['mountpoint'] + os.path.sep):
|
||||
continue
|
||||
blob_hexsha = blob.sha().hexdigest()
|
||||
repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep)
|
||||
if repo['root']:
|
||||
repo_path = os.path.join(repo['root'], repo_path)
|
||||
|
||||
salt.fileserver.wait_lock(lk_fn, dest)
|
||||
if os.path.isfile(blobshadest) and os.path.isfile(dest):
|
||||
with salt.utils.fopen(blobshadest, 'r') as fp_:
|
||||
sha = fp_.read()
|
||||
if sha == blob_hexsha:
|
||||
fnd['rel'] = path
|
||||
fnd['path'] = dest
|
||||
return fnd
|
||||
with salt.utils.fopen(lk_fn, 'w+') as fp_:
|
||||
fp_.write('')
|
||||
for filename in glob.glob(hashes_glob):
|
||||
try:
|
||||
os.remove(filename)
|
||||
except Exception:
|
||||
pass
|
||||
with salt.utils.fopen(dest, 'w+') as fp_:
|
||||
blob = None
|
||||
depth = 0
|
||||
if provider == 'gitpython':
|
||||
blob.stream_data(fp_)
|
||||
tree = _get_tree_gitpython(repo, tgt_env)
|
||||
if not tree:
|
||||
# Branch/tag/SHA not found in repo, try the next
|
||||
continue
|
||||
while True:
|
||||
depth += 1
|
||||
if depth > SYMLINK_RECURSE_DEPTH:
|
||||
break
|
||||
try:
|
||||
file_blob = tree / repo_path
|
||||
if stat.S_ISLNK(file_blob.mode):
|
||||
# Path is a symlink. The blob data corresponding to
|
||||
# this path's object ID will be the target of the
|
||||
# symlink. Follow the symlink and set repo_path to the
|
||||
# location indicated in the blob data.
|
||||
stream = six.StringIO()
|
||||
file_blob.stream_data(stream)
|
||||
stream.seek(0)
|
||||
link_tgt = stream.read()
|
||||
stream.close()
|
||||
repo_path = os.path.normpath(
|
||||
os.path.join(os.path.dirname(repo_path), link_tgt)
|
||||
)
|
||||
else:
|
||||
blob = file_blob
|
||||
break
|
||||
except KeyError:
|
||||
# File not found or repo_path points to a directory
|
||||
break
|
||||
if blob is None:
|
||||
continue
|
||||
blob_hexsha = blob.hexsha
|
||||
|
||||
elif provider == 'pygit2':
|
||||
fp_.write(blob.data)
|
||||
tree = _get_tree_pygit2(repo, tgt_env)
|
||||
if not tree:
|
||||
# Branch/tag/SHA not found in repo, try the next
|
||||
continue
|
||||
while True:
|
||||
depth += 1
|
||||
if depth > SYMLINK_RECURSE_DEPTH:
|
||||
break
|
||||
try:
|
||||
if stat.S_ISLNK(tree[repo_path].filemode):
|
||||
# Path is a symlink. The blob data corresponding to this
|
||||
# path's object ID will be the target of the symlink. Follow
|
||||
# the symlink and set repo_path to the location indicated
|
||||
# in the blob data.
|
||||
link_tgt = repo['repo'][tree[repo_path].oid].data
|
||||
repo_path = os.path.normpath(
|
||||
os.path.join(os.path.dirname(repo_path), link_tgt)
|
||||
)
|
||||
else:
|
||||
oid = tree[repo_path].oid
|
||||
blob = repo['repo'][oid]
|
||||
except KeyError:
|
||||
break
|
||||
if blob is None:
|
||||
continue
|
||||
blob_hexsha = blob.hex
|
||||
|
||||
elif provider == 'dulwich':
|
||||
fp_.write(blob.as_raw_string())
|
||||
with salt.utils.fopen(blobshadest, 'w+') as fp_:
|
||||
fp_.write(blob_hexsha)
|
||||
try:
|
||||
os.remove(lk_fn)
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
fnd['rel'] = path
|
||||
fnd['path'] = dest
|
||||
return fnd
|
||||
while True:
|
||||
depth += 1
|
||||
if depth > SYMLINK_RECURSE_DEPTH:
|
||||
break
|
||||
prefix_dirs, _, filename = repo_path.rpartition(os.path.sep)
|
||||
tree = _get_tree_dulwich(repo, tgt_env)
|
||||
tree = _dulwich_walk_tree(repo['repo'], tree, prefix_dirs)
|
||||
if not isinstance(tree, dulwich.objects.Tree):
|
||||
# Branch/tag/SHA not found in repo
|
||||
break
|
||||
try:
|
||||
mode, oid = tree[filename]
|
||||
if stat.S_ISLNK(mode):
|
||||
# Path is a symlink. The blob data corresponding to
|
||||
# this path's object ID will be the target of the
|
||||
# symlink. Follow the symlink and set repo_path to the
|
||||
# location indicated in the blob data.
|
||||
link_tgt = repo['repo'].get_object(oid).as_raw_string()
|
||||
repo_path = os.path.normpath(
|
||||
os.path.join(os.path.dirname(repo_path), link_tgt)
|
||||
)
|
||||
else:
|
||||
blob = repo['repo'].get_object(oid)
|
||||
break
|
||||
except KeyError:
|
||||
break
|
||||
if blob is None:
|
||||
continue
|
||||
blob_hexsha = blob.sha().hexdigest()
|
||||
|
||||
salt.fileserver.wait_lock(lk_fn, dest)
|
||||
if os.path.isfile(blobshadest) and os.path.isfile(dest):
|
||||
with salt.utils.fopen(blobshadest, 'r') as fp_:
|
||||
sha = fp_.read()
|
||||
if sha == blob_hexsha:
|
||||
fnd['rel'] = path
|
||||
fnd['path'] = dest
|
||||
return fnd
|
||||
with salt.utils.fopen(lk_fn, 'w+') as fp_:
|
||||
fp_.write('')
|
||||
for filename in glob.glob(hashes_glob):
|
||||
try:
|
||||
os.remove(filename)
|
||||
except Exception:
|
||||
pass
|
||||
with salt.utils.fopen(dest, 'w+') as fp_:
|
||||
if provider == 'gitpython':
|
||||
blob.stream_data(fp_)
|
||||
elif provider == 'pygit2':
|
||||
fp_.write(blob.data)
|
||||
elif provider == 'dulwich':
|
||||
fp_.write(blob.as_raw_string())
|
||||
with salt.utils.fopen(blobshadest, 'w+') as fp_:
|
||||
fp_.write(blob_hexsha)
|
||||
try:
|
||||
os.remove(lk_fn)
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
fnd['rel'] = path
|
||||
fnd['path'] = dest
|
||||
return fnd
|
||||
return fnd
|
||||
|
||||
|
||||
|
@ -485,6 +485,8 @@ def unzip(zip_file, dest, excludes=None, template=None, runas=None):
|
||||
|
||||
salt '*' archive.unzip /tmp/zipfile.zip /home/strongbad/ excludes=file_1,file_2
|
||||
'''
|
||||
if not excludes:
|
||||
excludes = []
|
||||
if runas:
|
||||
euid = os.geteuid()
|
||||
egid = os.getegid()
|
||||
@ -510,9 +512,6 @@ def unzip(zip_file, dest, excludes=None, template=None, runas=None):
|
||||
cleaned_files = []
|
||||
with zipfile.ZipFile(zip_file) as zfile:
|
||||
files = zfile.namelist()
|
||||
if excludes is None:
|
||||
zfile.extractall(dest)
|
||||
return files
|
||||
|
||||
if isinstance(excludes, string_types):
|
||||
excludes = [x.strip() for x in excludes.split(',')]
|
||||
@ -522,6 +521,13 @@ def unzip(zip_file, dest, excludes=None, template=None, runas=None):
|
||||
cleaned_files.extend([x for x in files if x not in excludes])
|
||||
for target in cleaned_files:
|
||||
if target not in excludes:
|
||||
if salt.utils.is_windows() is False:
|
||||
info = zfile.getinfo(target)
|
||||
# Check if zipped file is a symbolic link
|
||||
if info.external_attr == 2716663808L:
|
||||
source = zfile.read(target)
|
||||
os.symlink(source, os.path.join(dest, target))
|
||||
continue
|
||||
zfile.extract(target, dest)
|
||||
except Exception as exc:
|
||||
pass
|
||||
|
@ -37,6 +37,21 @@ def _iptables_cmd(family='ipv4'):
|
||||
return salt.utils.which('iptables')
|
||||
|
||||
|
||||
def _has_option(option, family='ipv4'):
|
||||
'''
|
||||
Return truth of whether iptables has `option`. For example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
_has_option('--wait')
|
||||
_has_option('--check', family='ipv6')
|
||||
'''
|
||||
cmd = '{0} --help'.format(_iptables_cmd(family))
|
||||
if option in __salt__['cmd.run'](cmd, output_loglevel='quiet'):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _conf(family='ipv4'):
|
||||
'''
|
||||
Some distros have a specific location for config files
|
||||
@ -330,8 +345,10 @@ def build_rule(table=None, chain=None, command=None, position='', full=None, fam
|
||||
else:
|
||||
flag = '--'
|
||||
|
||||
return '{0} -t {1} {2}{3} {4} {5} {6}'.format(_iptables_cmd(family),
|
||||
table, flag, command, chain, position, rule)
|
||||
wait = '--wait' if _has_option('--wait', family) else ''
|
||||
|
||||
return '{0} {1} -t {2} {3}{4} {5} {6} {7}'.format(_iptables_cmd(family),
|
||||
wait, table, flag, command, chain, position, rule)
|
||||
|
||||
return rule
|
||||
|
||||
@ -438,7 +455,9 @@ def set_policy(table='filter', chain=None, policy=None, family='ipv4'):
|
||||
if not policy:
|
||||
return 'Error: Policy needs to be specified'
|
||||
|
||||
cmd = '{0} -t {1} -P {2} {3}'.format(_iptables_cmd(family), table, chain, policy)
|
||||
wait = '--wait' if _has_option('--wait', family) else ''
|
||||
cmd = '{0} {1} -t {2} -P {3} {4}'.format(
|
||||
_iptables_cmd(family), wait, table, chain, policy)
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
return out
|
||||
|
||||
@ -495,11 +514,7 @@ def check(table='filter', chain=None, rule=None, family='ipv4'):
|
||||
return 'Error: Rule needs to be specified'
|
||||
ipt_cmd = _iptables_cmd(family)
|
||||
|
||||
HAS_CHECK = False
|
||||
if '--check' in __salt__['cmd.run']('{0} --help'.format(ipt_cmd), output_loglevel='quiet'):
|
||||
HAS_CHECK = True
|
||||
|
||||
if HAS_CHECK is False:
|
||||
if _has_option('--check', family):
|
||||
_chain_name = hex(uuid.getnode())
|
||||
|
||||
# Create temporary table
|
||||
@ -576,7 +591,9 @@ def new_chain(table='filter', chain=None, family='ipv4'):
|
||||
if not chain:
|
||||
return 'Error: Chain needs to be specified'
|
||||
|
||||
cmd = '{0} -t {1} -N {2}'.format(_iptables_cmd(family), table, chain)
|
||||
wait = '--wait' if _has_option('--wait', family) else ''
|
||||
cmd = '{0} {1} -t {2} -N {3}'.format(
|
||||
_iptables_cmd(family), wait, table, chain)
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
|
||||
if not out:
|
||||
@ -603,7 +620,9 @@ def delete_chain(table='filter', chain=None, family='ipv4'):
|
||||
if not chain:
|
||||
return 'Error: Chain needs to be specified'
|
||||
|
||||
cmd = '{0} -t {1} -X {2}'.format(_iptables_cmd(family), table, chain)
|
||||
wait = '--wait' if _has_option('--wait', family) else ''
|
||||
cmd = '{0} {1} -t {2} -X {3}'.format(
|
||||
_iptables_cmd(family), wait, table, chain)
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
|
||||
if not out:
|
||||
@ -637,7 +656,9 @@ def append(table='filter', chain=None, rule=None, family='ipv4'):
|
||||
if not rule:
|
||||
return 'Error: Rule needs to be specified'
|
||||
|
||||
cmd = '{0} -t {1} -A {2} {3}'.format(_iptables_cmd(family), table, chain, rule)
|
||||
wait = '--wait' if _has_option('--wait', family) else ''
|
||||
cmd = '{0} {1} -t {2} -A {3} {4}'.format(
|
||||
_iptables_cmd(family), wait, table, chain, rule)
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
if len(out) == 0:
|
||||
return True
|
||||
@ -683,7 +704,9 @@ def insert(table='filter', chain=None, position=None, rule=None, family='ipv4'):
|
||||
size = len(rules[table][chain]['rules'])
|
||||
position = (size + position) + 1
|
||||
|
||||
cmd = '{0} -t {1} -I {2} {3} {4}'.format(_iptables_cmd(family), table, chain, position, rule)
|
||||
wait = '--wait' if _has_option('--wait', family) else ''
|
||||
cmd = '{0} {1} -t {2} -I {3} {4} {5}'.format(
|
||||
_iptables_cmd(family), wait, table, chain, position, rule)
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
return out
|
||||
|
||||
@ -719,7 +742,9 @@ def delete(table, chain=None, position=None, rule=None, family='ipv4'):
|
||||
if position:
|
||||
rule = position
|
||||
|
||||
cmd = '{0} -t {1} -D {2} {3}'.format(_iptables_cmd(family), table, chain, rule)
|
||||
wait = '--wait' if _has_option('--wait', family) else ''
|
||||
cmd = '{0} {1} -t {2} -D {3} {4}'.format(
|
||||
_iptables_cmd(family), wait, table, chain, rule)
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
return out
|
||||
|
||||
@ -739,10 +764,8 @@ def flush(table='filter', chain='', family='ipv4'):
|
||||
salt '*' iptables.flush filter INPUT family=ipv6
|
||||
'''
|
||||
|
||||
if chain:
|
||||
cmd = '{0} -t {1} -F {2}'.format(_iptables_cmd(family), table, chain)
|
||||
else:
|
||||
cmd = '{0} -t {1} -F'.format(_iptables_cmd(family), table)
|
||||
wait = '--wait' if _has_option('--wait', family) else ''
|
||||
cmd = '{0} {1} -t {2} -F {3}'.format(_iptables_cmd(family), wait, table, chain)
|
||||
out = __salt__['cmd.run'](cmd)
|
||||
return out
|
||||
|
||||
|
@ -38,6 +38,8 @@ def _get_pip_bin(bin_env):
|
||||
which_result = __salt__['cmd.which_bin'](['pip2', 'pip', 'pip-python'])
|
||||
if which_result is None:
|
||||
raise CommandNotFoundError('Could not find a `pip` binary')
|
||||
if salt.utils.is_windows():
|
||||
return which_result.encode('string-escape')
|
||||
return which_result
|
||||
|
||||
# try to get pip bin from env
|
||||
|
@ -917,15 +917,16 @@ delete = remove
|
||||
purge = remove
|
||||
|
||||
|
||||
def upgrade(jail=None, chroot=None, force=False, local=False, dryrun=False):
|
||||
def upgrade(*names, **kwargs):
|
||||
'''
|
||||
Upgrade all packages (run a ``pkg upgrade``)
|
||||
Upgrade named or all packages (run a ``pkg upgrade``). If <package name> is
|
||||
ommitted, the operation is executed on all packages.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.upgrade
|
||||
salt '*' pkg.upgrade <package name>
|
||||
|
||||
jail
|
||||
Audit packages within the specified jail
|
||||
@ -934,7 +935,7 @@ def upgrade(jail=None, chroot=None, force=False, local=False, dryrun=False):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.upgrade jail=<jail name or id>
|
||||
salt '*' pkg.upgrade <package name> jail=<jail name or id>
|
||||
|
||||
chroot
|
||||
Audit packages within the specified chroot (ignored if ``jail`` is
|
||||
@ -944,7 +945,7 @@ def upgrade(jail=None, chroot=None, force=False, local=False, dryrun=False):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.upgrade chroot=/path/to/chroot
|
||||
salt '*' pkg.upgrade <package name> chroot=/path/to/chroot
|
||||
|
||||
|
||||
Any of the below options can also be used with ``jail`` or ``chroot``.
|
||||
@ -956,7 +957,7 @@ def upgrade(jail=None, chroot=None, force=False, local=False, dryrun=False):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.upgrade force=True
|
||||
salt '*' pkg.upgrade <package name> force=True
|
||||
|
||||
local
|
||||
Do not update the repository catalogs with ``pkg-update(8)``. A value
|
||||
@ -967,7 +968,7 @@ def upgrade(jail=None, chroot=None, force=False, local=False, dryrun=False):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.upgrade local=True
|
||||
salt '*' pkg.upgrade <package name> local=True
|
||||
|
||||
dryrun
|
||||
Dry-run mode: show what packages have updates available, but do not
|
||||
@ -978,13 +979,18 @@ def upgrade(jail=None, chroot=None, force=False, local=False, dryrun=False):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
salt '*' pkg.upgrade dryrun=True
|
||||
salt '*' pkg.upgrade <package name> dryrun=True
|
||||
'''
|
||||
ret = {'changes': {},
|
||||
'result': True,
|
||||
'comment': '',
|
||||
}
|
||||
|
||||
jail = kwargs.pop('jail', None)
|
||||
chroot = kwargs.pop('chroot', None)
|
||||
force = kwargs.pop('force', False)
|
||||
local = kwargs.pop('local', False)
|
||||
dryrun = kwargs.pop('dryrun', False)
|
||||
opts = ''
|
||||
if force:
|
||||
opts += 'f'
|
||||
@ -999,7 +1005,7 @@ def upgrade(jail=None, chroot=None, force=False, local=False, dryrun=False):
|
||||
|
||||
old = list_pkgs()
|
||||
call = __salt__['cmd.run_all'](
|
||||
'{0} upgrade {1}'.format(_pkg(jail, chroot), opts),
|
||||
'{0} upgrade {1} {2}'.format(_pkg(jail, chroot), opts, ' '.join(names)),
|
||||
python_shell=False,
|
||||
output_loglevel='trace'
|
||||
)
|
||||
|
@ -68,7 +68,12 @@ def _get_proc_name(proc):
|
||||
|
||||
It's backward compatible with < 2.0 versions of psutil.
|
||||
'''
|
||||
return proc.name() if PSUTIL2 else proc.name
|
||||
ret = []
|
||||
try:
|
||||
ret = proc.name() if PSUTIL2 else proc.name
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
pass
|
||||
return ret
|
||||
|
||||
|
||||
def _get_proc_status(proc):
|
||||
|
@ -70,20 +70,30 @@ def _exit_status(retcode):
|
||||
ret = {0: 'Successful completion.',
|
||||
1: 'An error occurred.',
|
||||
2: 'Usage error.'
|
||||
}[retcode]
|
||||
}[retcode]
|
||||
return ret
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Makes sure that ZFS is available.
|
||||
Makes sure that ZFS kernel module is loaded.
|
||||
'''
|
||||
if _check_zfs():
|
||||
kernel_module_chk = {
|
||||
'FreeBSD': 'kldstat -q -m zfs',
|
||||
'Linux': 'modinfo zfs',
|
||||
}
|
||||
cmd = kernel_module_chk.get(__grains__['kernel'], '')
|
||||
if cmd and salt_cmd.retcode(cmd) == 0:
|
||||
# Build dynamic functions and allow loading module
|
||||
_build_zfs_cmd_list()
|
||||
return 'zfs'
|
||||
return False
|
||||
|
||||
|
||||
def _add_doc(func, doc, prefix='\n\n '):
|
||||
'''
|
||||
Add documentation to a function
|
||||
'''
|
||||
if not func.__doc__:
|
||||
func.__doc__ = ''
|
||||
func.__doc__ += '{0}{1}'.format(prefix, doc)
|
||||
@ -124,21 +134,26 @@ def _make_function(cmd_name, doc):
|
||||
# At this point return the function we've just defined.
|
||||
return _cmd
|
||||
|
||||
# Run through all the available commands
|
||||
if _check_zfs():
|
||||
available_cmds = _available_commands()
|
||||
for available_cmd in available_cmds:
|
||||
|
||||
# Set the output from _make_function to be 'available_cmd_'.
|
||||
# i.e. 'list' becomes 'list_' in local module.
|
||||
setattr(
|
||||
sys.modules[__name__],
|
||||
'{0}_'.format(available_cmd),
|
||||
_make_function(available_cmd, available_cmds[available_cmd])
|
||||
)
|
||||
def _build_zfs_cmd_list():
|
||||
'''
|
||||
Run through zfs command options, and build equivalent functions dynamically
|
||||
'''
|
||||
# Run through all the available commands
|
||||
if _check_zfs():
|
||||
available_cmds = _available_commands()
|
||||
for available_cmd in available_cmds:
|
||||
|
||||
# Update the function alias so that salt finds the functions properly.
|
||||
__func_alias__['{0}_'.format(available_cmd)] = available_cmd
|
||||
# Set the output from _make_function to be 'available_cmd_'.
|
||||
# i.e. 'list' becomes 'list_' in local module.
|
||||
setattr(
|
||||
sys.modules[__name__],
|
||||
'{0}_'.format(available_cmd),
|
||||
_make_function(available_cmd, available_cmds[available_cmd])
|
||||
)
|
||||
|
||||
# Update the function alias so that salt finds the functions properly.
|
||||
__func_alias__['{0}_'.format(available_cmd)] = available_cmd
|
||||
|
||||
|
||||
def exists(name):
|
||||
@ -281,7 +296,8 @@ def list_(name='', **kwargs):
|
||||
'''
|
||||
.. versionadded:: Lithium
|
||||
|
||||
Return a list of all datasets or a specified dataset on the system and the values of their used, available, referenced, and mountpoint properties.
|
||||
Return a list of all datasets or a specified dataset on the system and the
|
||||
values of their used, available, referenced, and mountpoint properties.
|
||||
|
||||
.. note::
|
||||
|
||||
|
@ -49,7 +49,7 @@ def parse_input(args, condition=True):
|
||||
_args = []
|
||||
_kwargs = {}
|
||||
for arg in args:
|
||||
if isinstance(arg, six.string_types) and r'\n' not in arg and '\n' not in arg:
|
||||
if isinstance(arg, six.string_types):
|
||||
arg_name, arg_value = parse_kwarg(arg)
|
||||
if arg_name:
|
||||
_kwargs[arg_name] = yamlify_arg(arg_value)
|
||||
|
@ -47,6 +47,7 @@ class IptablesTestCase(TestCase):
|
||||
|
||||
# 'build_rule' function tests: 1
|
||||
|
||||
@patch.object(iptables, '_has_option', MagicMock(return_value=True))
|
||||
def test_build_rule(self):
|
||||
'''
|
||||
Test if it build a well-formatted iptables rule based on kwargs.
|
||||
@ -78,7 +79,7 @@ class IptablesTestCase(TestCase):
|
||||
match='state', jump='ACCEPT'),
|
||||
'Error: Command needs to be specified')
|
||||
|
||||
ret = '/sbin/iptables -t salt -I INPUT 3 -m state --jump ACCEPT '
|
||||
ret = '/sbin/iptables --wait -t salt -I INPUT 3 -m state --jump ACCEPT '
|
||||
with patch.object(iptables, '_iptables_cmd',
|
||||
MagicMock(return_value='/sbin/iptables')):
|
||||
self.assertEqual(iptables.build_rule('salt', 'INPUT', command='I',
|
||||
@ -161,6 +162,7 @@ class IptablesTestCase(TestCase):
|
||||
|
||||
# 'set_policy' function tests: 1
|
||||
|
||||
@patch.object(iptables, '_has_option', MagicMock(return_value=True))
|
||||
def test_set_policy(self):
|
||||
'''
|
||||
Test if it set the current policy for the specified table/chain
|
||||
@ -197,6 +199,7 @@ class IptablesTestCase(TestCase):
|
||||
|
||||
# 'check' function tests: 1
|
||||
|
||||
@patch.object(iptables, '_has_option', MagicMock(return_value=True))
|
||||
def test_check(self):
|
||||
'''
|
||||
Test if it check for the existence of a rule in the table and chain
|
||||
@ -286,6 +289,7 @@ class IptablesTestCase(TestCase):
|
||||
|
||||
# 'append' function tests: 1
|
||||
|
||||
@patch.object(iptables, '_has_option', MagicMock(return_value=True))
|
||||
def test_append(self):
|
||||
'''
|
||||
Test if it append a rule to the specified table/chain.
|
||||
@ -311,6 +315,7 @@ class IptablesTestCase(TestCase):
|
||||
|
||||
# 'insert' function tests: 1
|
||||
|
||||
@patch.object(iptables, '_has_option', MagicMock(return_value=True))
|
||||
def test_insert(self):
|
||||
'''
|
||||
Test if it insert a rule into the specified table/chain,
|
||||
@ -340,6 +345,7 @@ class IptablesTestCase(TestCase):
|
||||
|
||||
# 'delete' function tests: 1
|
||||
|
||||
@patch.object(iptables, '_has_option', MagicMock(return_value=True))
|
||||
def test_delete(self):
|
||||
'''
|
||||
Test if it delete a rule from the specified table/chain
|
||||
@ -358,6 +364,7 @@ class IptablesTestCase(TestCase):
|
||||
|
||||
# 'flush' function tests: 1
|
||||
|
||||
@patch.object(iptables, '_has_option', MagicMock(return_value=True))
|
||||
def test_flush(self):
|
||||
'''
|
||||
Test if it flush the chain in the specified table,
|
||||
|
Loading…
Reference in New Issue
Block a user