merge with develop

This commit is contained in:
Christian McHugh 2017-11-27 22:04:59 +00:00
commit 249987f750
52 changed files with 1071 additions and 250 deletions

View File

@ -1,6 +1,6 @@
---
<% vagrant = system('which vagrant 2>/dev/null >/dev/null') %>
<% version = '2017.7.2' %>
<% version = '2017.7.1' %>
<% platformsfile = ENV['SALT_KITCHEN_PLATFORMS'] || '.kitchen/platforms.yml' %>
<% driverfile = ENV['SALT_KITCHEN_DRIVER'] || '.kitchen/driver.yml' %>
@ -19,6 +19,8 @@ driver:
disable_upstart: false
provision_command:
- echo 'L /run/docker.sock - - - - /docker.sock' > /etc/tmpfiles.d/docker.conf
transport:
name: sftp
<% end %>
sudo: false
@ -164,6 +166,9 @@ suites:
clone_repo: false
salttesting_namespec: salttesting==2017.6.1
- name: py3
excludes:
- centos-6
- ubuntu-14.04
provisioner:
pillars:
top.sls:

View File

@ -1,9 +1,10 @@
# This file is only used for running the test suite with kitchen-salt.
source "https://rubygems.org"
source 'https://rubygems.org'
gem "test-kitchen"
gem "kitchen-salt", :git => 'https://github.com/saltstack/kitchen-salt.git'
gem 'test-kitchen'
gem 'kitchen-salt', :git => 'https://github.com/saltstack/kitchen-salt.git'
gem 'kitchen-sync'
gem 'git'
group :docker do

View File

@ -404,6 +404,22 @@ The above example will force the minion to use the :py:mod:`systemd
.. __: https://github.com/saltstack/salt/issues/new
Logging Restrictions
--------------------
As a rule, logging should not be done anywhere in a Salt module before it is
loaded. This rule apples to all code that would run before the ``__virtual__()``
function, as well as the code within the ``__virtual__()`` function itself.
If logging statements are made before the virtual function determines if
the module should be loaded, then those logging statements will be called
repeatedly. This clutters up log files unnecessarily.
Exceptions may be considered for logging statements made at the ``trace`` level.
However, it is better to provide the necessary information by another means.
One method is to :ref:`return error information <modules-error-info>` in the
``__virtual__()`` function.
.. _modules-virtual-name:
``__virtualname__``

View File

@ -161,6 +161,8 @@ class Master(salt.utils.parsers.MasterOptionParser, DaemonsMixin): # pylint: di
v_dirs,
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
root_dir=self.config['root_dir'],
sensitive_dirs=[self.config['pki_dir'], self.config['key_dir']],
)
# Clear out syndics from cachedir
@ -281,6 +283,8 @@ class Minion(salt.utils.parsers.MinionOptionParser, DaemonsMixin): # pylint: di
v_dirs,
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
root_dir=self.config['root_dir'],
sensitive_dirs=[self.config['pki_dir']],
)
except OSError as error:
@ -468,6 +472,8 @@ class ProxyMinion(salt.utils.parsers.ProxyMinionOptionParser, DaemonsMixin): #
v_dirs,
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
root_dir=self.config['root_dir'],
sensitive_dirs=[self.config['pki_dir']],
)
except OSError as error:
@ -576,6 +582,8 @@ class Syndic(salt.utils.parsers.SyndicOptionParser, DaemonsMixin): # pylint: di
],
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
root_dir=self.config['root_dir'],
sensitive_dirs=[self.config['pki_dir']],
)
except OSError as error:

View File

@ -32,7 +32,10 @@ class SPM(parsers.SPMParser):
v_dirs = [
self.config['cachedir'],
]
verify_env(v_dirs, self.config['user'],)
verify_env(v_dirs,
self.config['user'],
root_dir=self.config['root_dir'],
)
verify_log(self.config)
client = salt.spm.SPMClient(ui, self.config)
client.run(self.args)

View File

@ -1026,6 +1026,8 @@ class Single(object):
opts_pkg[u'__master_opts__'] = self.context[u'master_opts']
if u'_caller_cachedir' in self.opts:
opts_pkg[u'_caller_cachedir'] = self.opts[u'_caller_cachedir']
if u'known_hosts_file' in self.opts:
opts_pkg[u'known_hosts_file'] = self.opts[u'known_hosts_file']
else:
opts_pkg[u'_caller_cachedir'] = self.opts[u'cachedir']
# Use the ID defined in the roster file

View File

@ -67,7 +67,8 @@ class SaltCloud(salt.utils.parsers.SaltCloudParser):
if self.config['verify_env']:
verify_env(
[os.path.dirname(self.config['conf_file'])],
salt_master_user
salt_master_user,
root_dir=self.config['root_dir'],
)
logfile = self.config['log_file']
if logfile is not None and not logfile.startswith('tcp://') \

View File

@ -80,6 +80,7 @@ def _master_opts(cfg='master'):
cfg = os.environ.get(
'SALT_MASTER_CONFIG', os.path.join(default_dir, cfg))
opts = config.master_config(cfg)
opts['output'] = 'quiet'
return opts

View File

@ -42,6 +42,7 @@ import salt.utils.platform
import salt.utils.stringutils
import salt.utils.user
import salt.utils.verify
import salt.utils.versions
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.pillar import git_pillar
from salt.exceptions import FileserverConfigError, SaltMasterError
@ -534,7 +535,7 @@ class RemoteFuncs(object):
return ret
expr_form = load.get('expr_form')
if expr_form is not None and 'tgt_type' not in load:
salt.utils.warn_until(
salt.utils.versions.warn_until(
u'Neon',
u'_mine_get: minion {0} uses pre-Nitrogen API key '
u'"expr_form". Accepting for backwards compatibility '

View File

@ -13,6 +13,7 @@ import re
# Import Salt Libs
from salt.exceptions import CommandExecutionError
import salt.utils.path
import salt.utils.versions
log = logging.getLogger(__name__)
@ -635,8 +636,10 @@ def add_port(zone, port, permanent=True, force_masquerade=None):
# This will be deprecated in a future release
if force_masquerade is None:
force_masquerade = True
salt.utils.warn_until('Neon',
'add_port function will no longer force enable masquerading in future releases. Use add_masquerade to enable masquerading.')
salt.utils.versions.warn_until(
'Neon',
'add_port function will no longer force enable masquerading '
'in future releases. Use add_masquerade to enable masquerading.')
# (DEPRECATED) Force enable masquerading
# TODO: remove in future release
@ -709,8 +712,10 @@ def add_port_fwd(zone, src, dest, proto='tcp', dstaddr='', permanent=True, force
# This will be deprecated in a future release
if force_masquerade is None:
force_masquerade = True
salt.utils.warn_until('Neon',
'add_port_fwd function will no longer force enable masquerading in future releases. Use add_masquerade to enable masquerading.')
salt.utils.versions.warn_until(
'Neon',
'add_port_fwd function will no longer force enable masquerading '
'in future releases. Use add_masquerade to enable masquerading.')
# (DEPRECATED) Force enable masquerading
# TODO: remove in future release

View File

@ -6,9 +6,10 @@ Module for sending messages to Mattermost
:configuration: This module can be used by either passing an api_url and hook
directly or by specifying both in a configuration profile in the salt
master/minion config.
For example:
master/minion config. For example:
.. code-block:: yaml
mattermost:
hook: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
api_url: https://example.com
@ -35,6 +36,7 @@ __virtualname__ = 'mattermost'
def __virtual__():
'''
Return virtual name of the module.
:return: The virtual name of the module.
'''
return __virtualname__
@ -43,6 +45,7 @@ def __virtual__():
def _get_hook():
'''
Retrieves and return the Mattermost's configured hook
:return: String: the hook string
'''
hook = __salt__['config.get']('mattermost.hook') or \
@ -56,6 +59,7 @@ def _get_hook():
def _get_api_url():
'''
Retrieves and return the Mattermost's configured api url
:return: String: the api url string
'''
api_url = __salt__['config.get']('mattermost.api_url') or \
@ -69,6 +73,7 @@ def _get_api_url():
def _get_channel():
'''
Retrieves the Mattermost's configured channel
:return: String: the channel string
'''
channel = __salt__['config.get']('mattermost.channel') or \
@ -80,6 +85,7 @@ def _get_channel():
def _get_username():
'''
Retrieves the Mattermost's configured username
:return: String: the username string
'''
username = __salt__['config.get']('mattermost.username') or \
@ -95,14 +101,18 @@ def post_message(message,
hook=None):
'''
Send a message to a Mattermost channel.
:param channel: The channel name, either will work.
:param username: The username of the poster.
:param message: The message to send to the Mattermost channel.
:param api_url: The Mattermost api url, if not specified in the configuration.
:param hook: The Mattermost hook, if not specified in the configuration.
:return: Boolean if message was sent successfully.
CLI Example:
.. code-block:: bash
salt '*' mattermost.post_message message='Build is done"
'''
if not api_url:

View File

@ -1167,6 +1167,61 @@ def list_upgrades(bin_env=None,
return packages
def is_installed(pkgname=None,
bin_env=None,
user=None,
cwd=None):
'''
Filter list of installed apps from ``freeze`` and return True or False if
``pkgname`` exists in the list of packages installed.
.. note::
If the version of pip available is older than 8.0.3, the packages
wheel, setuptools, and distribute will not be reported by this function
even if they are installed. Unlike
:py:func:`pip.freeze <salt.modules.pip.freeze>`, this function always
reports the version of pip which is installed.
CLI Example:
.. code-block:: bash
salt '*' pip.is_installed salt
.. versionadded:: Oxygen
The packages wheel, setuptools, and distribute are included if the
installed pip is new enough.
'''
for line in freeze(bin_env=bin_env, user=user, cwd=cwd):
if line.startswith('-f') or line.startswith('#'):
# ignore -f line as it contains --find-links directory
# ignore comment lines
continue
elif line.startswith('-e hg+not trust'):
# ignore hg + not trust problem
continue
elif line.startswith('-e'):
line = line.split('-e ')[1]
version_, name = line.split('#egg=')
elif len(line.split('===')) >= 2:
name = line.split('===')[0]
version_ = line.split('===')[1]
elif len(line.split('==')) >= 2:
name = line.split('==')[0]
version_ = line.split('==')[1]
else:
logger.error('Can\'t parse line \'{0}\''.format(line))
continue
if pkgname:
if pkgname == name.lower():
return True
return False
def upgrade_available(pkg,
bin_env=None,
user=None,

View File

@ -771,7 +771,7 @@ def disable(**kwargs):
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule enable job failed.'
ret['comment'] = 'Event module not available. Schedule disable job failed.'
return ret

View File

@ -54,6 +54,7 @@ import salt.utils.args
import salt.utils.data
import salt.utils.files
import salt.utils.hashutils
import salt.utils.path
import salt.utils.pkg
import salt.utils.platform
import salt.utils.versions
@ -646,33 +647,10 @@ def _get_repo_details(saltenv):
# Do some safety checks on the repo_path as its contents can be removed,
# this includes check for bad coding
system_root = os.environ.get('SystemRoot', r'C:\Windows')
deny_paths = (
r'[a-z]\:\\$', # C:\, D:\, etc
r'\\$', # \
re.escape(system_root) # C:\Windows
)
if not salt.utils.path.safe_path(
path=local_dest,
allow_path='\\'.join([system_root, 'TEMP'])):
# Since the above checks anything in C:\Windows, there are some
# directories we may want to make exceptions for
allow_paths = (
re.escape('\\'.join([system_root, 'TEMP'])), # C:\Windows\TEMP
)
# Check the local_dest to make sure it's not one of the bad paths
good_path = True
for d_path in deny_paths:
if re.match(d_path, local_dest, flags=re.IGNORECASE) is not None:
# Found deny path
good_path = False
# If local_dest is one of the bad paths, check for exceptions
if not good_path:
for a_path in allow_paths:
if re.match(a_path, local_dest, flags=re.IGNORECASE) is not None:
# Found exception
good_path = True
if not good_path:
raise CommandExecutionError(
'Attempting to delete files from a possibly unsafe location: '
'{0}'.format(local_dest)

View File

@ -6,8 +6,7 @@ or for problem solving if your minion is having problems.
.. versionadded:: 0.12.0
:depends: - pythoncom
- wmi
:depends: - wmi
'''
# Import Python Libs

View File

@ -509,7 +509,7 @@ def destroy(zpool, force=False):
'''
ret = {}
ret[zpool] = {}
if not exists(zpool):
if not __salt__['zpool.exists'](zpool):
ret[zpool] = 'storage pool does not exist'
else:
zpool_cmd = _check_zpool()
@ -529,7 +529,7 @@ def destroy(zpool, force=False):
return ret
def scrub(zpool, stop=False):
def scrub(zpool, stop=False, pause=False):
'''
.. versionchanged:: 2016.3.0
@ -539,6 +539,13 @@ def scrub(zpool, stop=False):
name of storage pool
stop : boolean
if true, cancel ongoing scrub
pause : boolean
if true, pause ongoing scrub
.. versionadded:: Oxygen
.. note::
If both pause and stop are true, stop will win.
CLI Example:
@ -548,11 +555,18 @@ def scrub(zpool, stop=False):
'''
ret = {}
ret[zpool] = {}
if exists(zpool):
if __salt__['zpool.exists'](zpool):
zpool_cmd = _check_zpool()
cmd = '{zpool_cmd} scrub {stop}{zpool}'.format(
if stop:
action = '-s '
elif pause:
# NOTE: https://github.com/openzfs/openzfs/pull/407
action = '-p '
else:
action = ''
cmd = '{zpool_cmd} scrub {action}{zpool}'.format(
zpool_cmd=zpool_cmd,
stop='-s ' if stop else '',
action=action,
zpool=zpool
)
res = __salt__['cmd.run_all'](cmd, python_shell=False)
@ -567,7 +581,12 @@ def scrub(zpool, stop=False):
else:
ret[zpool]['error'] = res['stdout']
else:
ret[zpool]['scrubbing'] = True if not stop else False
if stop:
ret[zpool]['scrubbing'] = False
elif pause:
ret[zpool]['scrubbing'] = False
else:
ret[zpool]['scrubbing'] = True
else:
ret[zpool] = 'storage pool does not exist'
@ -595,6 +614,9 @@ def create(zpool, *vdevs, **kwargs):
additional pool properties
filesystem_properties : dict
additional filesystem properties
createboot : boolean
..versionadded:: Oxygen
create a boot partition
CLI Example:
@ -629,7 +651,7 @@ def create(zpool, *vdevs, **kwargs):
ret = {}
# Check if the pool_name is already being used
if exists(zpool):
if __salt__['zpool.exists'](zpool):
ret[zpool] = 'storage pool already exists'
return ret
@ -641,14 +663,21 @@ def create(zpool, *vdevs, **kwargs):
zpool_cmd = _check_zpool()
force = kwargs.get('force', False)
altroot = kwargs.get('altroot', None)
createboot = kwargs.get('createboot', False)
mountpoint = kwargs.get('mountpoint', None)
properties = kwargs.get('properties', None)
filesystem_properties = kwargs.get('filesystem_properties', None)
cmd = '{0} create'.format(zpool_cmd)
# bootsize implies createboot
if properties and 'bootsize' in properties:
createboot = True
# apply extra arguments from kwargs
if force: # force creation
cmd = '{0} -f'.format(cmd)
if createboot: # create boot paritition
cmd = '{0} -B'.format(cmd)
if properties: # create "-o property=value" pairs
optlist = []
for prop in properties:
@ -712,7 +741,7 @@ def add(zpool, *vdevs, **kwargs):
ret = {}
# check for pool
if not exists(zpool):
if not __salt__['zpool.exists'](zpool):
ret[zpool] = 'storage pool does not exist'
return ret
@ -765,7 +794,7 @@ def attach(zpool, device, new_device, force=False):
dlist = []
# check for pool
if not exists(zpool):
if not __salt__['zpool.exists'](zpool):
ret[zpool] = 'storage pool does not exist'
return ret
@ -827,7 +856,7 @@ def detach(zpool, device):
dlist = []
# check for pool
if not exists(zpool):
if not __salt__['zpool.exists'](zpool):
ret[zpool] = 'storage pool does not exist'
return ret
@ -848,6 +877,95 @@ def detach(zpool, device):
return ret
def split(zpool, newzpool, **kwargs):
'''
.. versionadded:: Oxygen
Splits devices off pool creating newpool.
.. note::
All vdevs in pool must be mirrors. At the time of the split,
newpool will be a replica of pool.
zpool : string
name of storage pool
newzpool : string
name of new storage pool
mountpoint : string
sets the mount point for the root dataset
altroot : string
sets altroot for newzpool
properties : dict
additional pool properties for newzpool
CLI Example:
.. code-block:: bash
salt '*' zpool.split datamirror databackup
salt '*' zpool.split datamirror databackup altroot=/backup
.. note::
Zpool properties can be specified at the time of creation of the pool by
passing an additional argument called "properties" and specifying the properties
with their respective values in the form of a python dictionary::
properties="{'property1': 'value1', 'property2': 'value2'}"
Example:
.. code-block:: bash
salt '*' zpool.split datamirror databackup properties="{'readonly': 'on'}"
'''
ret = {}
# Check if the pool_name is already being used
if __salt__['zpool.exists'](newzpool):
ret[newzpool] = 'storage pool already exists'
return ret
if not __salt__['zpool.exists'](zpool):
ret[zpool] = 'storage pool does not exists'
return ret
zpool_cmd = _check_zpool()
altroot = kwargs.get('altroot', None)
properties = kwargs.get('properties', None)
cmd = '{0} split'.format(zpool_cmd)
# apply extra arguments from kwargs
if properties: # create "-o property=value" pairs
optlist = []
for prop in properties:
if isinstance(properties[prop], bool):
value = 'on' if properties[prop] else 'off'
else:
if ' ' in properties[prop]:
value = "'{0}'".format(properties[prop])
else:
value = properties[prop]
optlist.append('-o {0}={1}'.format(prop, value))
opts = ' '.join(optlist)
cmd = '{0} {1}'.format(cmd, opts)
if altroot: # set altroot
cmd = '{0} -R {1}'.format(cmd, altroot)
cmd = '{0} {1} {2}'.format(cmd, zpool, newzpool)
# Create storage pool
res = __salt__['cmd.run_all'](cmd, python_shell=False)
# Check and see if the pools is available
if res['retcode'] != 0:
ret[newzpool] = res['stderr'] if 'stderr' in res else res['stdout']
else:
ret[newzpool] = 'split off from {}'.format(zpool)
return ret
def replace(zpool, old_device, new_device=None, force=False):
'''
.. versionchanged:: 2016.3.0
@ -878,7 +996,7 @@ def replace(zpool, old_device, new_device=None, force=False):
'''
ret = {}
# Make sure pool is there
if not exists(zpool):
if not __salt__['zpool.exists'](zpool):
ret[zpool] = 'storage pool does not exist'
return ret
@ -991,7 +1109,7 @@ def export(*pools, **kwargs):
return ret
for pool in pools:
if not exists(pool):
if not __salt__['zpool.exists'](pool):
ret[pool] = 'storage pool does not exist'
else:
pool_present.append(pool)
@ -1106,7 +1224,7 @@ def import_(zpool=None, new_name=None, **kwargs):
ret['error'] = res['stderr'] if 'stderr' in res else res['stdout']
else:
if zpool:
ret[zpool if not new_name else new_name] = 'imported' if exists(zpool if not new_name else new_name) else 'not found'
ret[zpool if not new_name else new_name] = 'imported' if __salt__['zpool.exists'](zpool if not new_name else new_name) else 'not found'
else:
ret = True
return ret
@ -1141,7 +1259,7 @@ def online(zpool, *vdevs, **kwargs):
dlist = []
# Check if the pool_name exists
if not exists(zpool):
if not __salt__['zpool.exists'](zpool):
ret[zpool] = 'storage pool does not exist'
return ret
@ -1197,7 +1315,7 @@ def offline(zpool, *vdevs, **kwargs):
ret = {}
# Check if the pool_name exists
if not exists(zpool):
if not __salt__['zpool.exists'](zpool):
ret[zpool] = 'storage pool does not exist'
return ret
@ -1225,6 +1343,50 @@ def offline(zpool, *vdevs, **kwargs):
return ret
def labelclear(device, force=False):
'''
.. versionadded:: Oxygen
Removes ZFS label information from the specified device
.. warning::
The device must not be part of an active pool configuration.
device : string
device
force : boolean
treat exported or foreign devices as inactive
CLI Example:
.. code-block:: bash
salt '*' zpool.labelclear /path/to/dev
'''
ret = {}
zpool_cmd = _check_zpool()
cmd = '{zpool_cmd} labelclear {force}{device}'.format(
zpool_cmd=zpool_cmd,
force='-f ' if force else '',
device=device,
)
# Bring all specified devices offline
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if res['retcode'] != 0:
## NOTE: skip the "use '-f' hint"
res['stderr'] = res['stderr'].split("\n")
if len(res['stderr']) >= 1:
if res['stderr'][0].startswith("use '-f'"):
del res['stderr'][0]
res['stderr'] = "\n".join(res['stderr'])
ret[device] = res['stderr'] if 'stderr' in res and res['stderr'] else res['stdout']
else:
ret[device] = 'cleared'
return ret
def reguid(zpool):
'''
.. versionadded:: 2016.3.0

View File

@ -310,25 +310,25 @@ class PillarCache(object):
return fresh_pillar.compile_pillar()
def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs
log.debug('Scanning pillar cache for information about minion {0} and saltenv {1}'.format(self.minion_id, self.saltenv))
log.debug('Scanning pillar cache for information about minion {0} and pillarenv {1}'.format(self.minion_id, self.pillarenv))
log.debug('Scanning cache: {0}'.format(self.cache._dict))
# Check the cache!
if self.minion_id in self.cache: # Keyed by minion_id
# TODO Compare grains, etc?
if self.saltenv in self.cache[self.minion_id]:
if self.pillarenv in self.cache[self.minion_id]:
# We have a cache hit! Send it back.
log.debug('Pillar cache hit for minion {0} and saltenv {1}'.format(self.minion_id, self.saltenv))
return self.cache[self.minion_id][self.saltenv]
log.debug('Pillar cache hit for minion {0} and pillarenv {1}'.format(self.minion_id, self.pillarenv))
return self.cache[self.minion_id][self.pillarenv]
else:
# We found the minion but not the env. Store it.
fresh_pillar = self.fetch_pillar()
self.cache[self.minion_id][self.saltenv] = fresh_pillar
log.debug('Pillar cache miss for saltenv {0} for minion {1}'.format(self.saltenv, self.minion_id))
self.cache[self.minion_id][self.pillarenv] = fresh_pillar
log.debug('Pillar cache miss for pillarenv {0} for minion {1}'.format(self.pillarenv, self.minion_id))
return fresh_pillar
else:
# We haven't seen this minion yet in the cache. Store it.
fresh_pillar = self.fetch_pillar()
self.cache[self.minion_id] = {self.saltenv: fresh_pillar}
self.cache[self.minion_id] = {self.pillarenv: fresh_pillar}
log.debug('Pillar cache miss for minion {0}'.format(self.minion_id))
log.debug('Current pillar cache: {0}'.format(self.cache._dict)) # FIXME hack!
return fresh_pillar

View File

@ -6,8 +6,11 @@ from __future__ import absolute_import
# Import python libs
import os
import logging
import pickle
import logging
# Import Salt modules
import salt.utils.files
# Import Salt libs
import salt.utils.files
@ -22,7 +25,7 @@ DETAILS = {}
DETAILS['services'] = {'apache': 'running', 'ntp': 'running', 'samba': 'stopped'}
DETAILS['packages'] = {'coreutils': '1.0', 'apache': '2.4', 'tinc': '1.4', 'redbull': '999.99'}
FILENAME = os.tmpnam()
FILENAME = salt.utils.files.mkstemp()
# Want logging!
log = logging.getLogger(__file__)

View File

@ -196,9 +196,7 @@ def __virtual__():
Only return if all the modules are available
'''
if not salt.utils.path.which('racadm'):
log.critical('fx2 proxy minion needs "racadm" to be installed.')
return False
return False, 'fx2 proxy minion needs "racadm" to be installed.'
return True

View File

@ -16,9 +16,21 @@ Dependencies
The ``napalm`` proxy module requires NAPALM_ library to be installed: ``pip install napalm``
Please check Installation_ for complete details.
.. _NAPALM: https://napalm.readthedocs.io
.. _Installation: https://napalm.readthedocs.io/en/latest/installation.html
.. _NAPALM: https://napalm-automation.net/
.. _Installation: http://napalm.readthedocs.io/en/latest/installation/index.html
.. note::
Beginning with Salt release 2017.7.3, it is recommended to use
``napalm`` >= ``2.0.0``. The library has been unified into a monolithic
package, as in opposite to separate packages per driver. For more details
you can check `this document <https://napalm-automation.net/reunification/>`_.
While it will still work with the old packages, bear in mind that the NAPALM
core team will maintain only the main ``napalm`` package.
Moreover, for additional capabilities, the users can always define a
library that extends NAPALM's base capabilities and configure the
``provider`` option (see below).
Pillar
------
@ -59,7 +71,7 @@ always_alive: ``True``
.. versionadded:: 2017.7.0
provider: ``napalm_base``
The module that provides the ``get_network_device`` function.
The library that provides the ``get_network_device`` function.
This option is useful when the user has more specific needs and requires
to extend the NAPALM capabilities using a private library implementation.
The only constraint is that the alternative library needs to have the
@ -129,17 +141,7 @@ from __future__ import absolute_import
import logging
log = logging.getLogger(__file__)
# Import third party lib
try:
# will try to import NAPALM
# https://github.com/napalm-automation/napalm
# pylint: disable=W0611
import napalm_base
# pylint: enable=W0611
HAS_NAPALM = True
except ImportError:
HAS_NAPALM = False
# Import Salt modules
from salt.ext import six
import salt.utils.napalm
@ -163,7 +165,7 @@ DETAILS = {}
def __virtual__():
return HAS_NAPALM or (False, 'Please install the NAPALM library: `pip install napalm`!')
return salt.utils.napalm.virtual(__opts__, 'napalm', __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported

View File

@ -75,6 +75,7 @@ from __future__ import unicode_literals
# Import salt lib
import salt.output
import salt.utils.network
from salt.ext import six
from salt.ext.six.moves import map
@ -812,7 +813,25 @@ def find(addr, best=True, display=_DEFAULT_DISPLAY):
ip = '' # pylint: disable=invalid-name
ipnet = None
results = {}
results = {
'int_net': [],
'int_descr': [],
'int_name': [],
'int_ip': [],
'int_mac': [],
'int_device': [],
'lldp_descr': [],
'lldp_int': [],
'lldp_device': [],
'lldp_mac': [],
'lldp_device_int': [],
'mac_device': [],
'mac_int': [],
'arp_device': [],
'arp_int': [],
'arp_mac': [],
'arp_ip': []
}
if isinstance(addr, int):
results['mac'] = findmac(vlan=addr, display=display)
@ -826,6 +845,8 @@ def find(addr, best=True, display=_DEFAULT_DISPLAY):
except IndexError:
# no problem, let's keep searching
pass
if salt.utils.network.is_ipv6(addr):
mac = False
if not mac:
try:
ip = napalm_helpers.convert(napalm_helpers.ip, addr) # pylint: disable=invalid-name

View File

@ -13,6 +13,7 @@ import os
import re
import shlex
import stat
import string
import tarfile
from contextlib import closing
@ -771,12 +772,24 @@ def extracted(name,
return ret
urlparsed_source = _urlparse(source_match)
source_hash_basename = urlparsed_source.path or urlparsed_source.netloc
urlparsed_scheme = urlparsed_source.scheme
urlparsed_path = os.path.join(
urlparsed_source.netloc,
urlparsed_source.path).rstrip(os.sep)
source_is_local = urlparsed_source.scheme in salt.utils.files.LOCAL_PROTOS
# urlparsed_scheme will be the drive letter if this is a Windows file path
# This checks for a drive letter as the scheme and changes it to file
if urlparsed_scheme and \
urlparsed_scheme.lower() in string.ascii_lowercase:
urlparsed_path = ':'.join([urlparsed_scheme, urlparsed_path])
urlparsed_scheme = 'file'
source_hash_basename = urlparsed_path or urlparsed_source.netloc
source_is_local = urlparsed_scheme in salt.utils.files.LOCAL_PROTOS
if source_is_local:
# Get rid of "file://" from start of source_match
source_match = os.path.realpath(os.path.expanduser(urlparsed_source.path))
source_match = os.path.realpath(os.path.expanduser(urlparsed_path))
if not os.path.isfile(source_match):
ret['comment'] = 'Source file \'{0}\' does not exist'.format(
salt.utils.url.redact_http_basic_auth(source_match))

View File

@ -1498,13 +1498,8 @@ def accept_vpc_peering_connection(name=None, conn_id=None, conn_name=None,
'''
log.debug('Called state to accept VPC peering connection')
pending = __salt__['boto_vpc.is_peering_connection_pending'](
conn_id=conn_id,
conn_name=conn_name,
region=region,
key=key,
keyid=keyid,
profile=profile
)
conn_id=conn_id, conn_name=conn_name, region=region, key=key,
keyid=keyid, profile=profile)
ret = {
'name': name,
@ -1515,30 +1510,25 @@ def accept_vpc_peering_connection(name=None, conn_id=None, conn_name=None,
if not pending:
ret['result'] = True
ret['changes'].update({
'old': 'No pending VPC peering connection found. '
'Nothing to be done.'
})
ret['changes'].update({'old':
'No pending VPC peering connection found. Nothing to be done.'})
return ret
if __opts__['test']:
ret['changes'].update({'old': 'Pending VPC peering connection found '
'and can be accepted'})
ret['changes'].update({'old':
'Pending VPC peering connection found and can be accepted'})
return ret
log.debug('Calling module to accept this VPC peering connection')
result = __salt__['boto_vpc.accept_vpc_peering_connection'](
conn_id=conn_id, name=conn_name, region=region, key=key,
fun = 'boto_vpc.accept_vpc_peering_connection'
log.debug('Calling `{0}()` to accept this VPC peering connection'.format(fun))
result = __salt__[fun](conn_id=conn_id, name=conn_name, region=region, key=key,
keyid=keyid, profile=profile)
if 'error' in result:
ret['comment'] = "Failed to request VPC peering: {0}".format(result['error'])
ret['comment'] = "Failed to accept VPC peering: {0}".format(result['error'])
ret['result'] = False
return ret
ret['changes'].update({
'old': '',
'new': result['msg']
})
ret['changes'].update({'old': '', 'new': result['msg']})
return ret

View File

@ -82,8 +82,9 @@ import logging
# Import Salt Libs
from salt.exceptions import CommandExecutionError
import salt.utils.path
from salt.output import nested
import salt.utils.path
import salt.utils.versions
log = logging.getLogger(__name__)
@ -231,8 +232,10 @@ def present(name,
# if prune_services == None, set to True and log a deprecation warning
if prune_services is None:
prune_services = True
salt.utils.warn_until('Neon',
'The \'prune_services\' argument default is currently True, but will be changed to True in future releases.')
salt.utils.versions.warn_until(
'Neon',
'The \'prune_services\' argument default is currently True, '
'but will be changed to True in future releases.')
ret = _present(name, block_icmp, prune_block_icmp, default, masquerade, ports, prune_ports,
port_fwd, prune_port_fwd, services, prune_services, interfaces, prune_interfaces,

View File

@ -293,7 +293,7 @@ def absent(name, entry=None, entries=None, family='ipv4', **kwargs):
kwargs['set_name'],
family)
else:
command = __salt__['ipset.delete'](kwargs['set_name'], _entry, family, **kwargs)
command = __salt__['ipset.delete'](kwargs['set_name'], entry, family, **kwargs)
if 'Error' not in command:
ret['changes'] = {'locale': name}
ret['result'] = True

View File

@ -27,13 +27,14 @@ from jinja2.exceptions import TemplateRuntimeError
from jinja2.ext import Extension
# Import salt libs
from salt.exceptions import TemplateError
import salt.fileclient
import salt.utils.data
import salt.utils.files
import salt.utils.url
import salt.utils.yamldumper
from salt.utils.decorators.jinja import jinja_filter, jinja_test, jinja_global
from salt.utils.odict import OrderedDict
from salt.exceptions import TemplateError
log = logging.getLogger(__name__)
@ -44,18 +45,6 @@ __all__ = [
GLOBAL_UUID = uuid.UUID('91633EBF-1C86-5E33-935A-28061F4B480E')
# To dump OrderedDict objects as regular dicts. Used by the yaml
# template filter.
class OrderedDictDumper(yaml.Dumper): # pylint: disable=W0232
pass
yaml.add_representer(OrderedDict,
yaml.representer.SafeRepresenter.represent_dict,
Dumper=OrderedDictDumper)
class SaltCacheLoader(BaseLoader):
'''
@ -796,8 +785,8 @@ class SerializerExtension(Extension, object):
return Markup(json.dumps(value, sort_keys=sort_keys, indent=indent).strip())
def format_yaml(self, value, flow_style=True):
yaml_txt = yaml.dump(value, default_flow_style=flow_style,
Dumper=OrderedDictDumper).strip()
yaml_txt = salt.utils.yamldumper.safe_dump(
value, default_flow_style=flow_style).strip()
if yaml_txt.endswith('\n...'):
yaml_txt = yaml_txt[:len(yaml_txt)-4]
return Markup(yaml_txt)

View File

@ -14,6 +14,7 @@ Utils for the NAPALM modules and proxy.
.. versionadded:: 2017.7.0
'''
# Import Python libs
from __future__ import absolute_import
import traceback
@ -22,20 +23,31 @@ import importlib
from functools import wraps
# Import Salt libs
from salt.ext import six as six
import salt.output
import salt.utils.platform
# Import 3rd-party libs
from salt.ext import six
# Import third party libs
try:
# will try to import NAPALM
# https://github.com/napalm-automation/napalm
# pylint: disable=W0611
import napalm_base
import napalm
import napalm.base as napalm_base
# pylint: enable=W0611
HAS_NAPALM = True
HAS_NAPALM_BASE = False # doesn't matter anymore, but needed for the logic below
try:
NAPALM_MAJOR = int(napalm.__version__.split('.')[0])
except AttributeError:
NAPALM_MAJOR = 0
except ImportError:
HAS_NAPALM = False
try:
import napalm_base
HAS_NAPALM_BASE = True
except ImportError:
HAS_NAPALM_BASE = False
try:
# try importing ConnectionClosedException
@ -81,7 +93,7 @@ def virtual(opts, virtualname, filename):
'''
Returns the __virtual__.
'''
if HAS_NAPALM and (is_proxy(opts) or is_minion(opts)):
if ((HAS_NAPALM and NAPALM_MAJOR >= 2) or HAS_NAPALM_BASE) and (is_proxy(opts) or is_minion(opts)):
return virtualname
else:
return (

View File

@ -344,3 +344,60 @@ def sanitize_win_path(winpath):
elif isinstance(winpath, six.text_type):
winpath = winpath.translate(dict((ord(c), u'_') for c in intab))
return winpath
def safe_path(path, allow_path=None):
r'''
.. versionadded:: 2017.7.3
Checks that the path is safe for modification by Salt. For example, you
wouldn't want to have salt delete the contents of ``C:\Windows``. The
following directories are considered unsafe:
- C:\, D:\, E:\, etc.
- \
- C:\Windows
Args:
path (str): The path to check
allow_paths (str, list): A directory or list of directories inside of
path that may be safe. For example: ``C:\Windows\TEMP``
Returns:
bool: True if safe, otherwise False
'''
# Create regex definitions for directories that may be unsafe to modify
system_root = os.environ.get('SystemRoot', 'C:\\Windows')
deny_paths = (
r'[a-z]\:\\$', # C:\, D:\, etc
r'\\$', # \
re.escape(system_root) # C:\Windows
)
# Make allow_path a list
if allow_path and not isinstance(allow_path, list):
allow_path = [allow_path]
# Create regex definition for directories we may want to make exceptions for
allow_paths = list()
if allow_path:
for item in allow_path:
allow_paths.append(re.escape(item))
# Check the path to make sure it's not one of the bad paths
good_path = True
for d_path in deny_paths:
if re.match(d_path, path, flags=re.IGNORECASE) is not None:
# Found deny path
good_path = False
# If local_dest is one of the bad paths, check for exceptions
if not good_path:
for a_path in allow_paths:
if re.match(a_path, path, flags=re.IGNORECASE) is not None:
# Found exception
good_path = True
return good_path

View File

@ -104,11 +104,12 @@ def dict_search_and_replace(d, old, new, expanded):
def find_value_to_expand(x, v):
a = x
for i in v[2:-1].split(':'):
if a is None:
return v
if i in a:
a = a.get(i)
else:
a = v
return a
return v
return a

View File

@ -30,9 +30,12 @@ import salt.defaults.exitcodes
import salt.utils.files
import salt.utils.platform
import salt.utils.user
import salt.utils.versions
log = logging.getLogger(__name__)
ROOT_DIR = 'c:\\salt' if salt.utils.platform.is_windows() else '/'
def zmq_version():
'''
@ -194,13 +197,34 @@ def verify_files(files, user):
return True
def verify_env(dirs, user, permissive=False, sensitive_dirs=None, skip_extra=False):
def verify_env(
dirs,
user,
permissive=False,
pki_dir='',
skip_extra=False,
root_dir=ROOT_DIR,
sensitive_dirs=None):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
if pki_dir:
salt.utils.versions.warn_until(
'Neon',
'Use of \'pki_dir\' was detected: \'pki_dir\' has been deprecated '
'in favor of \'sensitive_dirs\'. Support for \'pki_dir\' will be '
'removed in Salt Neon.'
)
sensitive_dirs = sensitive_dirs or []
sensitive_dirs.append(list(pki_dir))
if salt.utils.platform.is_windows():
return win_verify_env(dirs, permissive, sensitive_dirs, skip_extra)
return win_verify_env(root_dir,
dirs,
permissive=permissive,
skip_extra=skip_extra,
sensitive_dirs=sensitive_dirs)
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
@ -526,18 +550,37 @@ def verify_log(opts):
log.warning('Insecure logging configuration detected! Sensitive data may be logged.')
def win_verify_env(dirs, permissive=False, sensitive_dirs=None, skip_extra=False):
def win_verify_env(
path,
dirs,
permissive=False,
pki_dir='',
skip_extra=False,
sensitive_dirs=None):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
if pki_dir:
salt.utils.versions.warn_until(
'Neon',
'Use of \'pki_dir\' was detected: \'pki_dir\' has been deprecated '
'in favor of \'sensitive_dirs\'. Support for \'pki_dir\' will be '
'removed in Salt Neon.'
)
sensitive_dirs = sensitive_dirs or []
sensitive_dirs.append(list(pki_dir))
import salt.utils.win_functions
import salt.utils.win_dacl
import salt.utils.path
# Get the root path directory where salt is installed
path = dirs[0]
while os.path.basename(path) not in ['salt', 'salt-tests-tmpdir']:
path, base = os.path.split(path)
# Make sure the file_roots is not set to something unsafe since permissions
# on that directory are reset
if not salt.utils.path.safe_path(path=path):
raise CommandExecutionError(
'`file_roots` set to a possibly unsafe location: {0}'.format(path)
)
# Create the root path directory if missing
if not os.path.isdir(path):

View File

@ -1133,9 +1133,14 @@ def get_name(principal):
try:
return win32security.LookupAccountSid(None, sid_obj)[0]
except TypeError:
raise CommandExecutionError(
'Could not find User for {0}'.format(principal))
except (pywintypes.error, TypeError) as exc:
if type(exc) == pywintypes.error:
win_error = win32api.FormatMessage(exc.winerror).rstrip('\n')
message = 'Error resolving {0} ({1})'.format(principal, win_error)
else:
message = 'Error resolving {0}'.format(principal)
raise CommandExecutionError(message)
def get_owner(obj_name):
@ -1173,7 +1178,7 @@ def get_owner(obj_name):
owner_sid = 'S-1-1-0'
else:
raise CommandExecutionError(
'Failed to set permissions: {0}'.format(exc.strerror))
'Failed to get owner: {0}'.format(exc.strerror))
return get_name(win32security.ConvertSidToStringSid(owner_sid))

View File

@ -986,7 +986,9 @@ class TestDaemon(object):
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
TMP,
],
RUNTIME_VARS.RUNNING_TESTS_USER)
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=master_opts['root_dir'],
)
cls.master_opts = master_opts
cls.minion_opts = minion_opts

View File

@ -0,0 +1,4 @@
test_file:
file.managed:
- name: /tmp/nonbase_env
- source: salt://nonbase_env

View File

@ -0,0 +1 @@
it worked - new environment!

View File

@ -9,9 +9,11 @@ pillars:
default:
network:
dns:
{% if __grains__['os'] == 'should_never_match' %}
srv1: 192.168.0.1
srv2: 192.168.0.2
domain: example.com
{% endif %}
ntp:
srv1: 192.168.10.10
srv2: 192.168.10.20

View File

@ -1,6 +1,6 @@
environment: base
classes:
{% for class in ['default'] %}
{% for class in ['default', 'roles.app'] %}
- {{ class }}
{% endfor %}

View File

@ -186,7 +186,7 @@ class SaltUtilSyncPillarTest(ModuleCase):
'''))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [1])
self.run_function('test.sleep', [5])
post_pillar = self.run_function('pillar.raw')
self.assertIn(pillar_key, post_pillar.get(pillar_key, 'didnotwork'))

View File

@ -1618,3 +1618,23 @@ class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'], 'Failure!')
self.assertFalse(state_run[state_id]['result'])
def test_state_nonbase_environment(self):
'''
test state.sls with saltenv using a nonbase environment
with a salt source
'''
state_run = self.run_function(
'state.sls',
mods='non-base-env',
saltenv='prod'
)
state_id = 'file_|-test_file_|-/tmp/nonbase_env_|-managed'
self.assertEqual(state_run[state_id]['comment'], 'File /tmp/nonbase_env updated')
self.assertTrue(state_run['file_|-test_file_|-/tmp/nonbase_env_|-managed']['result'])
self.assertTrue(os.path.isfile('/tmp/nonbase_env'))
def tearDown(self):
nonbase_file = '/tmp/nonbase_env'
if os.path.isfile(nonbase_file):
os.remove(nonbase_file)

View File

@ -5,6 +5,7 @@ from __future__ import absolute_import
import os
import shutil
import tempfile
import textwrap
# Import Salt Testing libs
from tests.support.case import ShellCase
@ -57,6 +58,36 @@ class KeyTest(ShellCase, ShellCaseCommonTestsMixin):
if USERA in user:
self.run_call('user.delete {0} remove=True'.format(USERA))
def test_remove_key(self):
'''
test salt-key -d usage
'''
min_name = 'minibar'
pki_dir = self.master_opts['pki_dir']
key = os.path.join(pki_dir, 'minions', min_name)
with salt.utils.files.fopen(key, 'w') as fp:
fp.write(textwrap.dedent('''\
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoqIZDtcQtqUNs0wC7qQz
JwFhXAVNT5C8M8zhI+pFtF/63KoN5k1WwAqP2j3LquTG68WpxcBwLtKfd7FVA/Kr
OF3kXDWFnDi+HDchW2lJObgfzLckWNRFaF8SBvFM2dys3CGSgCV0S/qxnRAjrJQb
B3uQwtZ64ncJAlkYpArv3GwsfRJ5UUQnYPDEJwGzMskZ0pHd60WwM1gMlfYmNX5O
RBEjybyNpYDzpda6e6Ypsn6ePGLkP/tuwUf+q9wpbRE3ZwqERC2XRPux+HX2rGP+
mkzpmuHkyi2wV33A9pDfMgRHdln2CLX0KgfRGixUQhW1o+Kmfv2rq4sGwpCgLbTh
NwIDAQAB
-----END PUBLIC KEY-----
'''))
check_key = self.run_key('-p {0}'.format(min_name))
self.assertIn('Accepted Keys:', check_key)
self.assertIn('minibar: -----BEGIN PUBLIC KEY-----', check_key)
remove_key = self.run_key('-d {0} -y'.format(min_name))
check_key = self.run_key('-p {0}'.format(min_name))
self.assertEqual([], check_key)
def test_list_accepted_args(self):
'''
test salt-key -l for accepted arguments

View File

@ -4,6 +4,8 @@ salt-ssh testing
'''
# Import Python libs
from __future__ import absolute_import
import os
import shutil
# Import salt testing libs
from tests.support.case import SSHCase
@ -19,3 +21,21 @@ class SSHTest(SSHCase):
'''
ret = self.run_function('test.ping')
self.assertTrue(ret, 'Ping did not return true')
def test_thin_dir(self):
'''
test to make sure thin_dir is created
and salt-call file is included
'''
thin_dir = self.run_function('config.get', ['thin_dir'], wipe=False)
os.path.isdir(thin_dir)
os.path.exists(os.path.join(thin_dir, 'salt-call'))
os.path.exists(os.path.join(thin_dir, 'running_data'))
def tearDown(self):
'''
make sure to clean up any old ssh directories
'''
salt_dir = self.run_function('config.get', ['thin_dir'], wipe=False)
if os.path.exists(salt_dir):
shutil.rmtree(salt_dir)

View File

@ -7,8 +7,13 @@ import shutil
# Import Salt Testing Libs
from tests.support.case import SSHCase
from tests.support.paths import TMP
# Import Salt Libs
from salt.ext import six
SSH_SLS = 'ssh_state_tests'
SSH_SLS_FILE = '/tmp/test'
class SSHStateTest(SSHCase):
@ -37,6 +42,87 @@ class SSHStateTest(SSHCase):
check_file = self.run_function('file.file_exists', ['/tmp/test'])
self.assertTrue(check_file)
def test_state_show_sls(self):
'''
test state.show_sls with salt-ssh
'''
ret = self.run_function('state.show_sls', [SSH_SLS])
self._check_dict_ret(ret=ret, val='__sls__', exp_ret=SSH_SLS)
check_file = self.run_function('file.file_exists', [SSH_SLS_FILE], wipe=False)
self.assertFalse(check_file)
def test_state_show_top(self):
'''
test state.show_top with salt-ssh
'''
ret = self.run_function('state.show_top')
self.assertEqual(ret, {u'base': [u'master_tops_test', u'core']})
def test_state_single(self):
'''
state.single with salt-ssh
'''
ret_out = {'name': 'itworked',
'result': True,
'comment': 'Success!'}
single = self.run_function('state.single',
['test.succeed_with_changes name=itworked'])
for key, value in six.iteritems(single):
self.assertEqual(value['name'], ret_out['name'])
self.assertEqual(value['result'], ret_out['result'])
self.assertEqual(value['comment'], ret_out['comment'])
def test_show_highstate(self):
'''
state.show_highstate with salt-ssh
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(TMP, 'testfile')
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]['__env__'], 'base')
def test_state_high(self):
'''
state.high with salt-ssh
'''
ret_out = {'name': 'itworked',
'result': True,
'comment': 'Success!'}
high = self.run_function('state.high', ['"{"itworked": {"test": ["succeed_with_changes"]}}"'])
for key, value in six.iteritems(high):
self.assertEqual(value['name'], ret_out['name'])
self.assertEqual(value['result'], ret_out['result'])
self.assertEqual(value['comment'], ret_out['comment'])
def test_show_lowstate(self):
'''
state.show_lowstate with salt-ssh
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
def test_state_low(self):
'''
state.low with salt-ssh
'''
ret_out = {'name': 'itworked',
'result': True,
'comment': 'Success!'}
low = self.run_function('state.low', ['"{"state": "test", "fun": "succeed_with_changes", "name": "itworked"}"'])
for key, value in six.iteritems(low):
self.assertEqual(value['name'], ret_out['name'])
self.assertEqual(value['result'], ret_out['result'])
self.assertEqual(value['comment'], ret_out['comment'])
def test_state_request_check_clear(self):
'''
test state.request system with salt-ssh
@ -60,7 +146,7 @@ class SSHStateTest(SSHCase):
run = self.run_function('state.run_request', wipe=False)
check_file = self.run_function('file.file_exists', ['/tmp/test'], wipe=False)
check_file = self.run_function('file.file_exists', [SSH_SLS_FILE], wipe=False)
self.assertTrue(check_file)
def tearDown(self):
@ -70,3 +156,6 @@ class SSHStateTest(SSHCase):
salt_dir = self.run_function('config.get', ['thin_dir'], wipe=False)
if os.path.exists(salt_dir):
shutil.rmtree(salt_dir)
if os.path.exists(SSH_SLS_FILE):
os.remove(SSH_SLS_FILE)

View File

@ -7,7 +7,6 @@ from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest
from tests.support.mixins import SaltReturnAssertsMixin
@ -15,32 +14,58 @@ from tests.support.mixins import SaltReturnAssertsMixin
import salt.utils.path
INIT_DELAY = 5
SERVICE_NAME = 'crond'
@destructiveTest
@skipIf(salt.utils.path.which('crond') is None, 'crond not installed')
class ServiceTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the service state
'''
def setUp(self):
self.service_name = 'cron'
cmd_name = 'crontab'
os_family = self.run_function('grains.get', ['os_family'])
if os_family == 'RedHat':
self.service_name = 'crond'
elif os_family == 'Arch':
self.service_name = 'systemd-journald'
cmd_name = 'systemctl'
if salt.utils.path.which(cmd_name) is None:
self.skipTest('{0} is not installed'.format(cmd_name))
def check_service_status(self, exp_return):
'''
helper method to check status of service
'''
check_status = self.run_function('service.status', name=SERVICE_NAME)
check_status = self.run_function('service.status',
name=self.service_name)
if check_status is not exp_return:
self.fail('status of service is not returning correctly')
def test_service_running(self):
'''
test service.running state module
'''
stop_service = self.run_function('service.stop', self.service_name)
self.assertTrue(stop_service)
self.check_service_status(False)
start_service = self.run_state('service.running',
name=self.service_name)
self.assertTrue(start_service)
self.check_service_status(True)
def test_service_dead(self):
'''
test service.dead state module
'''
start_service = self.run_state('service.running', name=SERVICE_NAME)
start_service = self.run_state('service.running',
name=self.service_name)
self.assertSaltTrueReturn(start_service)
self.check_service_status(True)
ret = self.run_state('service.dead', name=SERVICE_NAME)
ret = self.run_state('service.dead', name=self.service_name)
self.assertSaltTrueReturn(ret)
self.check_service_status(False)
@ -48,11 +73,12 @@ class ServiceTest(ModuleCase, SaltReturnAssertsMixin):
'''
test service.dead state module with init_delay arg
'''
start_service = self.run_state('service.running', name=SERVICE_NAME)
start_service = self.run_state('service.running',
name=self.service_name)
self.assertSaltTrueReturn(start_service)
self.check_service_status(True)
ret = self.run_state('service.dead', name=SERVICE_NAME,
ret = self.run_state('service.dead', name=self.service_name,
init_delay=INIT_DELAY)
self.assertSaltTrueReturn(ret)
self.check_service_status(False)

View File

@ -111,7 +111,9 @@ class AdaptedConfigurationTestCaseMixin(object):
rdict['sock_dir'],
conf_dir
],
RUNTIME_VARS.RUNNING_TESTS_USER)
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=rdict['root_dir'],
)
rdict['config_dir'] = conf_dir
rdict['conf_file'] = os.path.join(conf_dir, config_for)

View File

@ -1165,6 +1165,60 @@ class PipTestCase(TestCase, LoaderModuleMockMixin):
}
)
def test_is_installed_true(self):
eggs = [
'M2Crypto==0.21.1',
'-e git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8#egg=SaltTesting-dev',
'bbfreeze==1.1.0',
'bbfreeze-loader==1.1.0',
'pycrypto==2.6'
]
mock = MagicMock(
return_value={
'retcode': 0,
'stdout': '\n'.join(eggs)
}
)
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='6.1.1')):
ret = pip.is_installed(pkgname='bbfreeze')
mock.assert_called_with(
['pip', 'freeze'],
cwd=None,
runas=None,
python_shell=False,
use_vt=False,
)
self.assertTrue(ret)
def test_is_installed_false(self):
eggs = [
'M2Crypto==0.21.1',
'-e git+git@github.com:s0undt3ch/salt-testing.git@9ed81aa2f918d59d3706e56b18f0782d1ea43bf8#egg=SaltTesting-dev',
'bbfreeze==1.1.0',
'bbfreeze-loader==1.1.0',
'pycrypto==2.6'
]
mock = MagicMock(
return_value={
'retcode': 0,
'stdout': '\n'.join(eggs)
}
)
with patch.dict(pip.__salt__, {'cmd.run_all': mock}):
with patch('salt.modules.pip.version',
MagicMock(return_value='6.1.1')):
ret = pip.is_installed(pkgname='notexist')
mock.assert_called_with(
['pip', 'freeze'],
cwd=None,
runas=None,
python_shell=False,
use_vt=False,
)
self.assertFalse(ret)
def test_install_pre_argument_in_resulting_command(self):
pkg = 'pep8'
# Lower than 1.4 versions don't end-up with `--pre` in the resulting

View File

@ -21,6 +21,12 @@ from tests.support.mock import (
# Import Salt Libs
import salt.modules.win_dns_client as win_dns_client
try:
import wmi
HAS_WMI = True
except ImportError:
HAS_WMI = False
class Mockwmi(object):
'''
@ -59,6 +65,7 @@ class Mockwinapi(object):
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(not HAS_WMI, 'WMI only available on Windows')
class WinDnsClientTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.win_dns_client
@ -66,16 +73,13 @@ class WinDnsClientTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
# wmi and pythoncom modules are platform specific...
wmi = types.ModuleType('wmi')
pythoncom = types.ModuleType('pythoncom')
sys_modules_patcher = patch.dict('sys.modules', {'wmi': wmi, 'pythoncom': pythoncom})
mock_pythoncom = types.ModuleType('pythoncom')
sys_modules_patcher = patch.dict('sys.modules',
{'pythoncom': mock_pythoncom})
sys_modules_patcher.start()
self.addCleanup(sys_modules_patcher.stop)
self.WMI = Mock()
self.addCleanup(delattr, self, 'WMI')
wmi.WMI = Mock(return_value=self.WMI)
pythoncom.CoInitialize = Mock()
pythoncom.CoUninitialize = Mock()
return {win_dns_client: {'wmi': wmi}}
# 'get_dns_servers' function tests: 1
@ -90,7 +94,8 @@ class WinDnsClientTestCase(TestCase, LoaderModuleMockMixin):
patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]), \
patch.object(self.WMI, 'Win32_NetworkAdapterConfiguration',
return_value=[Mockwmi()]):
return_value=[Mockwmi()]), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertListEqual(win_dns_client.get_dns_servers
('Local Area Connection'),
['10.1.1.10'])
@ -113,23 +118,22 @@ class WinDnsClientTestCase(TestCase, LoaderModuleMockMixin):
'''
Test if it add the DNS server to the network interface.
'''
with patch('salt.utils.winapi.Com', MagicMock()):
with patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]):
with patch.object(self.WMI, 'Win32_NetworkAdapterConfiguration',
return_value=[Mockwmi()]):
self.assertFalse(win_dns_client.add_dns('10.1.1.10',
'Ethernet'))
with patch('salt.utils.winapi.Com', MagicMock()), \
patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]), \
patch.object(self.WMI, 'Win32_NetworkAdapterConfiguration',
return_value=[Mockwmi()]), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertFalse(win_dns_client.add_dns('10.1.1.10', 'Ethernet'))
self.assertTrue(win_dns_client.add_dns
('10.1.1.10', 'Local Area Connection'))
self.assertTrue(win_dns_client.add_dns('10.1.1.10', 'Local Area Connection'))
with patch.object(win_dns_client, 'get_dns_servers',
MagicMock(return_value=['10.1.1.10'])):
with patch.dict(win_dns_client.__salt__,
{'cmd.retcode': MagicMock(return_value=0)}):
self.assertTrue(win_dns_client.add_dns('10.1.1.0',
'Local Area Connection'))
MagicMock(return_value=['10.1.1.10'])), \
patch.dict(win_dns_client.__salt__,
{'cmd.retcode': MagicMock(return_value=0)}), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertTrue(win_dns_client.add_dns('10.1.1.0', 'Local Area Connection'))
# 'dns_dhcp' function tests: 1
@ -148,9 +152,10 @@ class WinDnsClientTestCase(TestCase, LoaderModuleMockMixin):
'''
Test if it get the type of DNS configuration (dhcp / static)
'''
with patch('salt.utils.winapi.Com', MagicMock()):
with patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]):
with patch.object(self.WMI, 'Win32_NetworkAdapterConfiguration',
return_value=[Mockwmi()]):
with patch('salt.utils.winapi.Com', MagicMock()), \
patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]), \
patch.object(self.WMI, 'Win32_NetworkAdapterConfiguration',
return_value=[Mockwmi()]), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertTrue(win_dns_client.get_dns_config())

View File

@ -5,7 +5,6 @@
# Import Python Libs
from __future__ import absolute_import
import types
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
@ -22,6 +21,12 @@ from tests.support.mock import (
import salt.utils.network
import salt.modules.win_network as win_network
try:
import wmi
HAS_WMI = True
except ImportError:
HAS_WMI = False
class Mockwmi(object):
'''
@ -64,12 +69,9 @@ class WinNetworkTestCase(TestCase, LoaderModuleMockMixin):
Test cases for salt.modules.win_network
'''
def setup_loader_modules(self):
# wmi modules are platform specific...
wmi = types.ModuleType('wmi')
self.WMI = Mock()
self.addCleanup(delattr, self, 'WMI')
wmi.WMI = Mock(return_value=self.WMI)
return {win_network: {'wmi': wmi}}
return {win_network: {}}
# 'ping' function tests: 1
@ -156,6 +158,7 @@ class WinNetworkTestCase(TestCase, LoaderModuleMockMixin):
# 'interfaces_names' function tests: 1
@skipIf(not HAS_WMI, "WMI only available on Windows")
def test_interfaces_names(self):
'''
Test if it return a list of all the interfaces names
@ -164,7 +167,8 @@ class WinNetworkTestCase(TestCase, LoaderModuleMockMixin):
with patch('salt.utils.winapi.Com', MagicMock()), \
patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]), \
patch('salt.utils', Mockwinapi):
patch('salt.utils', Mockwinapi), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertListEqual(win_network.interfaces_names(),
['Ethernet'])

View File

@ -3,7 +3,6 @@
# Import python libs
from __future__ import absolute_import
import sys
import types
# Import Salt libs
from salt.ext import six
@ -12,25 +11,16 @@ from salt.ext import six
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, Mock, patch, ANY
# wmi and pythoncom modules are platform specific...
wmi = types.ModuleType('wmi')
sys.modules['wmi'] = wmi
pythoncom = types.ModuleType('pythoncom')
sys.modules['pythoncom'] = pythoncom
if NO_MOCK is False:
WMI = Mock()
wmi.WMI = Mock(return_value=WMI)
pythoncom.CoInitialize = Mock()
pythoncom.CoUninitialize = Mock()
try:
import wmi
except ImportError:
pass
# This is imported late so mock can do its job
import salt.modules.win_status as status
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(sys.stdin.encoding != 'UTF-8', 'UTF-8 encoding required for this test is not supported')
@skipIf(status.HAS_WMI is False, 'This test requires Windows')
class TestProcsBase(TestCase):
def __init__(self, *args, **kwargs):
@ -55,7 +45,9 @@ class TestProcsBase(TestCase):
self.__processes.append(process)
def call_procs(self):
WMI = Mock()
WMI.win32_process = Mock(return_value=self.__processes)
with patch.object(wmi, 'WMI', Mock(return_value=WMI)):
self.result = status.procs()
@ -101,6 +93,7 @@ class TestProcsAttributes(TestProcsBase):
self.assertEqual(self.proc['user_domain'], self._expected_domain)
@skipIf(sys.stdin.encoding != 'UTF-8', 'UTF-8 encoding required for this test is not supported')
class TestProcsUnicodeAttributes(TestProcsBase):
def setUp(self):
unicode_str = u'\xc1'

View File

@ -13,6 +13,7 @@ from __future__ import absolute_import
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
Mock,
MagicMock,
patch,
NO_MOCK,
@ -152,3 +153,171 @@ class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
ret = zpool.get('mypool', 'readonly')
res = OrderedDict([('mypool', OrderedDict([('readonly', 'off')]))])
self.assertEqual(res, ret)
def test_scrub_start(self):
'''
Tests start of scrub
'''
ret = {}
ret['stdout'] = ""
ret['stderr'] = ""
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
mock_exists = MagicMock(return_value=True)
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
ret = zpool.scrub('mypool')
res = OrderedDict([('mypool', OrderedDict([('scrubbing', True)]))])
self.assertEqual(res, ret)
def test_scrub_pause(self):
'''
Tests pause of scrub
'''
ret = {}
ret['stdout'] = ""
ret['stderr'] = ""
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
mock_exists = MagicMock(return_value=True)
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
ret = zpool.scrub('mypool', pause=True)
res = OrderedDict([('mypool', OrderedDict([('scrubbing', False)]))])
self.assertEqual(res, ret)
def test_scrub_stop(self):
'''
Tests pauze of scrub
'''
ret = {}
ret['stdout'] = ""
ret['stderr'] = ""
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
mock_exists = MagicMock(return_value=True)
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
ret = zpool.scrub('mypool', stop=True)
res = OrderedDict([('mypool', OrderedDict([('scrubbing', False)]))])
self.assertEqual(res, ret)
def test_split_success(self):
'''
Tests split on success
'''
ret = {}
ret['stdout'] = ""
ret['stderr'] = ""
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
mock_exists = Mock()
mock_exists.side_effect = [False, True]
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
ret = zpool.split('datapool', 'backuppool')
res = OrderedDict([('backuppool', 'split off from datapool')])
self.assertEqual(res, ret)
def test_split_exist_new(self):
'''
Tests split on exising new pool
'''
ret = {}
ret['stdout'] = ""
ret['stderr'] = ""
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
mock_exists = Mock()
mock_exists.side_effect = [True, True]
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
ret = zpool.split('datapool', 'backuppool')
res = OrderedDict([('backuppool', 'storage pool already exists')])
self.assertEqual(res, ret)
def test_split_missing_pool(self):
'''
Tests split on missing source pool
'''
ret = {}
ret['stdout'] = ""
ret['stderr'] = ""
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
mock_exists = Mock()
mock_exists.side_effect = [False, False]
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
ret = zpool.split('datapool', 'backuppool')
res = OrderedDict([('datapool', 'storage pool does not exists')])
self.assertEqual(res, ret)
def test_split_not_mirror(self):
'''
Tests split on source pool is not a mirror
'''
ret = {}
ret['stdout'] = ""
ret['stderr'] = "Unable to split datapool: Source pool must be composed only of mirrors"
ret['retcode'] = 1
mock_cmd = MagicMock(return_value=ret)
mock_exists = Mock()
mock_exists.side_effect = [False, True]
with patch.dict(zpool.__salt__, {'zpool.exists': mock_exists}):
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
ret = zpool.split('datapool', 'backuppool')
res = OrderedDict([('backuppool', 'Unable to split datapool: Source pool must be composed only of mirrors')])
self.assertEqual(res, ret)
def test_labelclear_success(self):
'''
Tests labelclear on succesful label removal
'''
ret = {}
ret['stdout'] = ""
ret['stderr'] = ""
ret['retcode'] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
ret = zpool.labelclear('/dev/rdsk/c0t0d0', force=False)
res = OrderedDict([('/dev/rdsk/c0t0d0', 'cleared')])
self.assertEqual(res, ret)
def test_labelclear_cleared(self):
'''
Tests labelclear on device with no label
'''
ret = {}
ret['stdout'] = ""
ret['stderr'] = "failed to read label from /dev/rdsk/c0t0d0"
ret['retcode'] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
ret = zpool.labelclear('/dev/rdsk/c0t0d0', force=False)
res = OrderedDict([('/dev/rdsk/c0t0d0', 'failed to read label from /dev/rdsk/c0t0d0')])
self.assertEqual(res, ret)
def test_labelclear_exported(self):
'''
Tests labelclear on device with from exported pool
'''
ret = {}
ret['stdout'] = ""
ret['stderr'] = "\n".join([
"use '-f' to override the following error:",
'/dev/rdsk/c0t0d0 is a member of exported pool "mypool"',
])
ret['retcode'] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {'cmd.run_all': mock_cmd}):
ret = zpool.labelclear('/dev/rdsk/c0t0d0', force=False)
res = OrderedDict([('/dev/rdsk/c0t0d0', '/dev/rdsk/c0t0d0 is a member of exported pool "mypool"')])
self.assertEqual(res, ret)

View File

@ -34,10 +34,17 @@ class SaltclassPillarTestCase(TestCase, LoaderModuleMockMixin):
}}
def _runner(self, expected_ret):
full_ret = {}
parsed_ret = []
try:
full_ret = saltclass.ext_pillar(fake_minion_id, fake_pillar, fake_args)
parsed_ret = full_ret['__saltclass__']['classes']
# Fail the test if we hit our NoneType error
except TypeError as err:
self.fail(err)
# Else give the parsed content result
self.assertListEqual(parsed_ret, expected_ret)
def test_succeeds(self):
ret = ['default.users', 'default.motd', 'default']
ret = ['default.users', 'default.motd', 'default', 'roles.app']
self._runner(ret)

View File

@ -20,6 +20,7 @@ from tests.support.mock import (
# Import Salt Libs
import salt.states.archive as archive
from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-builtin
import salt.utils.platform
def _isfile_side_effect(path):
@ -33,10 +34,13 @@ def _isfile_side_effect(path):
'''
return {
'/tmp/foo.tar.gz': True,
'c:\\tmp\\foo.tar.gz': True,
'/tmp/out': False,
'\\tmp\\out': False,
'/usr/bin/tar': True,
'/bin/tar': True,
'/tmp/test_extracted_tar': False,
'c:\\tmp\\test_extracted_tar': False,
}[path]
@ -59,6 +63,10 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin):
archive.extracted tar options
'''
if salt.utils.platform.is_windows():
source = 'c:\\tmp\\foo.tar.gz'
tmp_dir = 'c:\\tmp\\test_extracted_tar'
else:
source = '/tmp/foo.tar.gz'
tmp_dir = '/tmp/test_extracted_tar'
test_tar_opts = [
@ -94,24 +102,23 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin):
with patch.dict(archive.__opts__, {'test': False,
'cachedir': tmp_dir,
'hash_type': 'sha256'}):
with patch.dict(archive.__salt__, {'file.directory_exists': mock_false,
'hash_type': 'sha256'}),\
patch.dict(archive.__salt__, {'file.directory_exists': mock_false,
'file.file_exists': mock_false,
'state.single': state_single_mock,
'file.makedirs': mock_true,
'cmd.run_all': mock_run,
'archive.list': list_mock,
'file.source_list': mock_source_list}):
with patch.dict(archive.__states__, {'file.directory': mock_true}):
with patch.object(os.path, 'isfile', isfile_mock):
'file.source_list': mock_source_list}),\
patch.dict(archive.__states__, {'file.directory': mock_true}),\
patch.object(os.path, 'isfile', isfile_mock),\
patch('salt.utils.path.which', MagicMock(return_value=True)):
for test_opts, ret_opts in zip(test_tar_opts, ret_tar_opts):
ret = archive.extracted(tmp_dir,
source,
options=test_opts,
archive.extracted(tmp_dir, source, options=test_opts,
enforce_toplevel=False)
ret_opts.append(source)
mock_run.assert_called_with(ret_opts,
cwd=tmp_dir + os.sep,
mock_run.assert_called_with(ret_opts, cwd=tmp_dir + os.sep,
python_shell=False)
def test_tar_gnutar(self):
@ -142,10 +149,11 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin):
'file.makedirs': mock_true,
'cmd.run_all': run_all,
'archive.list': list_mock,
'file.source_list': mock_source_list}):
with patch.dict(archive.__states__, {'file.directory': mock_true}):
with patch.object(os.path, 'isfile', isfile_mock):
ret = archive.extracted('/tmp/out',
'file.source_list': mock_source_list}),\
patch.dict(archive.__states__, {'file.directory': mock_true}),\
patch.object(os.path, 'isfile', isfile_mock),\
patch('salt.utils.path.which', MagicMock(return_value=True)):
ret = archive.extracted(os.path.join(os.sep + 'tmp', 'out'),
source,
options='xvzf',
enforce_toplevel=False,
@ -180,10 +188,11 @@ class ArchiveTestCase(TestCase, LoaderModuleMockMixin):
'file.makedirs': mock_true,
'cmd.run_all': run_all,
'archive.list': list_mock,
'file.source_list': mock_source_list}):
with patch.dict(archive.__states__, {'file.directory': mock_true}):
with patch.object(os.path, 'isfile', isfile_mock):
ret = archive.extracted('/tmp/out',
'file.source_list': mock_source_list}),\
patch.dict(archive.__states__, {'file.directory': mock_true}),\
patch.object(os.path, 'isfile', isfile_mock),\
patch('salt.utils.path.which', MagicMock(return_value=True)):
ret = archive.extracted(os.path.join(os.sep + 'tmp', 'out'),
source,
options='xvzf',
enforce_toplevel=False,

View File

@ -662,7 +662,6 @@ class TestCustomExtensions(TestCase):
# type of the rendered variable (should be unicode, which is the same as
# six.text_type). This should cover all use cases but also allow the test
# to pass on CentOS 6 running Python 2.7.
self.assertIn('!!python/unicode', rendered)
self.assertIn('str value', rendered)
self.assertIsInstance(rendered, six.text_type)

View File

@ -112,7 +112,7 @@ class TestVerify(TestCase):
root_dir = tempfile.mkdtemp(dir=TMP)
var_dir = os.path.join(root_dir, 'var', 'log', 'salt')
key_dir = os.path.join(root_dir, 'key_dir')
verify_env([var_dir, key_dir], getpass.getuser(), sensitive_dirs=[key_dir])
verify_env([var_dir, key_dir], getpass.getuser(), root_dir=root_dir, sensitive_dirs=[key_dir])
self.assertTrue(os.path.exists(var_dir))
self.assertTrue(os.path.exists(key_dir))