salt/tests/integration/__init__.py

2183 lines
81 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
2012-03-28 06:10:13 +00:00
'''
Set up the Salt integration test suite
'''
# Import Python libs
from __future__ import absolute_import, print_function
import os
import re
import sys
import copy
import json
import time
2016-05-08 18:39:57 +00:00
import stat
import errno
import signal
2012-03-28 06:10:13 +00:00
import shutil
import pprint
2014-11-26 00:41:02 +00:00
import atexit
import socket
import logging
import tempfile
import threading
import subprocess
import multiprocessing
from datetime import datetime, timedelta
try:
import pwd
except ImportError:
pass
STATE_FUNCTION_RUNNING_RE = re.compile(
r'''The function (?:"|')(?P<state_func>.*)(?:"|') is running as PID '''
r'(?P<pid>[\d]+) and was started at (?P<date>.*) with jid (?P<jid>[\d]+)'
)
INTEGRATION_TEST_DIR = os.path.dirname(
os.path.normpath(os.path.abspath(__file__))
)
CODE_DIR = os.path.dirname(os.path.dirname(INTEGRATION_TEST_DIR))
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.case import ShellTestCase
from salttesting.mixins import CheckShellBinaryNameAndVersionMixIn
from salttesting.parser import PNUM, print_header, SaltTestcaseParser
from salttesting.helpers import requires_sshd_server
from salttesting.helpers import ensure_in_syspath, RedirectStdStreams
# Update sys.path
ensure_in_syspath(CODE_DIR)
2012-03-28 06:10:13 +00:00
# Import Salt libs
import salt
import salt.config
import salt.minion
2012-05-28 03:00:10 +00:00
import salt.runner
2013-02-08 00:35:50 +00:00
import salt.output
import salt.version
import salt.utils
import salt.utils.process
import salt.log.setup as salt_log_setup
from salt.ext import six
2012-03-09 07:47:34 +00:00
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
2016-05-10 00:47:09 +00:00
from salt.utils.nb_popen import NonBlockingPopen
from salt.exceptions import SaltClientError
try:
from shlex import quote as _quote # pylint: disable=E0611
except ImportError:
from pipes import quote as _quote
try:
import salt.master
except ImportError:
2014-11-22 10:12:06 +00:00
# Not required for raet tests
pass
# Import 3rd-party libs
import yaml
import msgpack
2014-11-22 10:12:06 +00:00
import salt.ext.six as six
from salt.ext.six.moves import cStringIO
2016-07-19 17:04:24 +00:00
try:
import salt.ext.six.moves.socketserver as socketserver
2016-07-19 17:04:24 +00:00
except ImportError:
import socketserver
Remove repr formatting flag in places where it is used solely for quoting (#34183) * salt/cloud/__init__.py: remove repr formatting * salt/cloud/clouds/azurearm.py: remove repr formatting * salt/cloud/clouds/ec2.py: remove repr formatting * salt/cloud/clouds/profitbricks.py: remove repr formatting * salt/loader.py: remove repr formatting * salt/modules/win_file.py: remove repr formatting * salt/modules/zypper.py: remove repr formatting * salt/pillar/consul_pillar.py: remove repr formatting * salt/renderers/pyobjects.py: remove repr formatting * salt/returners/sentry_return.py: remove repr formatting * salt/states/bower.py: remove repr formatting * salt/states/cabal.py: remove repr formatting * salt/states/cmd.py: remove repr formatting * salt/states/composer.py: remove repr formatting * salt/states/win_network.py: remove repr formatting * salt/states/eselect.py: remove repr formatting * salt/states/file.py: remove repr formatting * salt/states/htpasswd.py: remove repr formatting * salt/states/memcached.py: remove repr formatting * salt/states/npm.py: remove repr formatting * salt/states/pip_state.py: remove repr formatting * salt/states/pkg.py: remove repr formatting * salt/states/pkgrepo.py: remove repr formatting * salt/states/supervisord.py: remove repr formatting * salt/states/timezone.py: remove repr formatting * salt/states/virtualenv_mod.py: remove repr formatting * salt/states/dockerio.py: remove repr formatting * salt/states/win_system.py: remove repr formatting * salt/utils/nb_popen.py: remove repr formatting * salt/utils/cloud.py: remove repr formatting * Add pylint disable due to legit usage of repr flag See https://github.com/saltstack/salt-pylint/pull/6 * Fix composer tests These tests needed to be updated because quoting was changed in the state module in 9dc9146. There was an unnecessary !r used for the exception class there, which means that instead of the exception class being passed through the formatter and coming out with the equivalent value of err.__str__(), we get a repr'ed instance of the exception class (i.e. SaltException('',)) in the state output. The unit test was asserting that we have that repr'ed instance of SaltException in the output, a case of writing the test to confirm the badly-conceived output in the state. This has also been corrected. * salt/cloud/clouds/azurearm.py: lint fixes * salt/modules/boto_s3_bucket.py: lint fixes * salt/modules/minion.py: lint fixes * salt/modules/reg.py: lint fixes * salt/modules/testinframod.py: lint fixes * salt/modules/win_iis.py: lint fixes * salt/pillar/csvpillar.py: lint fixes * salt/utils/win_functions.py: lint fixes * salt/states/nxos.py: lint fixes * salt/returners/mongo_future_return.py: lint fixes * tests/integration/__init__.py: lint fixes * tests/unit/context_test.py: lint fixes * tests/integration/states/file.py: lint fixes * tests/integration/utils/test_reactor.py: lint fixes * tests/integration/utils/testprogram.py: lint fixes * tests/unit/__init__.py: lint fixes * tests/integration/shell/minion.py: lint fixes * tests/unit/modules/boto_apigateway_test.py: lint fixes * tests/unit/modules/boto_cognitoidentity_test.py: lint fixes * tests/unit/modules/boto_elasticsearch_domain_test.py: lint fixes * tests/unit/modules/k8s_test.py: lint fixes * tests/unit/modules/reg_win_test.py: lint fixes * tests/unit/states/boto_apigateway_test.py: lint fixes * tests/unit/states/boto_cognitoidentity_test.py: lint fixes * tests/unit/states/boto_elasticsearch_domain_test.py: lint fixes
2016-06-29 20:30:18 +00:00
2015-11-02 23:15:02 +00:00
if salt.utils.is_windows():
import win32api
from tornado import gen
from tornado import ioloop
try:
from salttesting.helpers import terminate_process_pid
except ImportError:
# Once the latest salt-testing works against salt's develop branch
# uncomment the following 2 lines and delete the function defined
# in this except
#print('Please upgrade your version of salt-testing')
#sys.exit(1)
import psutil
def terminate_process_pid(pid, only_children=False):
children = []
process = None
# Let's begin the shutdown routines
try:
process = psutil.Process(pid)
if hasattr(process, 'children'):
children = process.children(recursive=True)
except psutil.NoSuchProcess:
log.info('No process with the PID %s was found running', pid)
if process and only_children is False:
try:
cmdline = process.cmdline()
except psutil.AccessDenied:
# OSX denies us access to the above information
cmdline = None
if not cmdline:
try:
cmdline = process.as_dict()
except psutil.NoSuchProcess as exc:
log.debug('No such process found. Stacktrace: {0}'.format(exc))
if psutil.pid_exists(pid):
log.info('Terminating process: %s', cmdline)
process.terminate()
try:
process.wait(timeout=10)
except psutil.TimeoutExpired:
pass
if psutil.pid_exists(pid):
log.warning('Killing process: %s', cmdline)
process.kill()
if psutil.pid_exists(pid):
log.warning('Process left behind which we were unable to kill: %s', cmdline)
if children:
# Lets log and kill any child processes which salt left behind
def kill_children(_children, kill=False):
for child in _children[:][::-1]: # Iterate over a reversed copy of the list
try:
if not kill and child.status() == psutil.STATUS_ZOMBIE:
# Zombie processes will exit once child processes also exit
continue
try:
cmdline = child.cmdline()
except psutil.AccessDenied as err:
log.debug('Cannot obtain child process cmdline: %s', err)
cmdline = ''
if not cmdline:
cmdline = child.as_dict()
if kill:
log.warning('Killing child process left behind: %s', cmdline)
child.kill()
else:
log.warning('Terminating child process left behind: %s', cmdline)
child.terminate()
if not psutil.pid_exists(child.pid):
_children.remove(child)
except psutil.NoSuchProcess:
_children.remove(child)
try:
kill_children([child for child in children if child.is_running()
and not any(sys.argv[0] in cmd for cmd in child.cmdline())])
except psutil.AccessDenied:
# OSX denies us access to the above information
kill_children(children)
if children:
psutil.wait_procs(children, timeout=3, callback=lambda proc: kill_children(children, kill=True))
if children:
psutil.wait_procs(children, timeout=1, callback=lambda proc: kill_children(children, kill=True))
2015-11-02 23:15:02 +00:00
SYS_TMP_DIR = os.path.realpath(
[develop] Merge forward from 2016.3 to develop (#32494) * fix sorting by latest version when called with an attribute * remove reference to master_alive_check * Fixes saltstack/salt#28262 * Resolve memory leak in authentication * outputter virt_list does not exist anymore * Update proxmox documentation * Fix documentation on boto_asg and boto_elb modules and states * modules.win_timezone: don't list all zones in debug log * Correcty index glusterfs bricks Fixes issue #32311 * Cleaner deprecation process with decorators * Add deprecation decorator scaffold * Capture type error and unhandled exceptions while function calls * Aware of the current and future version of deprecation * Implement initially is_deprecated decorator * Add an alias for the capitalization * Fix capitalization easier way * Remove an extra line * Add successor name to the deprecation decorator. * Granulate logging and error messages. * Implement function swapper * Raise later the caught exception * Clarify exception message * Save function original name * Remove an extra line * Hide an alternative hidden function name in the error message, preserving the error itself * Rename variable as private * Add a method to detect if a function is using its previous version * Message to the log and/or raise an exception accordingly to the status of used function * Log an error along with the exception * Add internal method documentation * Add documentation and usage process for decorator "is_deprecated" * Add documentation and process usage for the decorator "with_deprecated" * Hide private method name * Fix PEP8, re-word the error message * Deprecate basic uptime function * Add initial decorator unit test * Rename old/new functions, mock versions * Move frequent data to the test setup * Add logging on EOL exception * Rename and document high to low version test on is_deprecated * Implement a test on low to high version of is_deprecated decorator * Add a correction to the test description * Remove a dead code * Implement a test for high to low version on is_deprecated, using with_successor param * Correct typso adn mistaeks * Implement high to low version with successor param on is_deprecated * Setup a virtual name for the module * Implement test for with_deprecated should raise an exception if same deprecated function not found * Implement test for with_deprecated an old function is picked up if configured * Correct test description purpose * Implement test with_deprecated when no deprecation is requested * Add logging test to the configured deprecation request * Add logging testing when deprecated version wasn't requested * Implement test EOL for with_deprecated decorator * Correct test explanation * Rename the test * Implement with_deprecated no EOL, deprecated other function name * Implement with_deprecated, deprecated other function name, EOL reached * Add test description for the with_deprecated + with_name + EOL * Fix confusing test names * Add logging test to the is_deprecated decorator when function as not found. * Add more test point to each test, remove empty lines * Bugfix: at certain conditions a wrong alias name is reported to the log * Fix a typo in a comment * Add test for the logging * Disable a pylint: None will _never_ be raised * Fix test for the deprecated "status.uptime" version * Bugfix: Do not yank raised exceptions * Remove unnecessary decorator * Add test for the new uptime * Add test for the new uptime fails when /proc/uptime does not exists * Rename old test case * Skip test for the UTC time, unless freeze time is used. * Fix pylint * Fix documentation * Bugfix: proxy-pass the docstring of the decorated function * Lint fix * Fixes saltstack/salt#28262 for 2015.5 branch * Update master config docs * Improve git_pillar documentation/logging * Add note about different behavior of top file in git_pillar * Make log entry for a missing pillar SLS file more accurate for git_pillar * FreeBSD supports packages in format java/openjdk7 so the prior commit broke that functionality. Check freebsd/pkg#1409 for more info. * FreeBSD supports packages in format java/openjdk7 so the prior commit broke that functionality. Check freebsd/pkg#1409 for more info. * Update glusterfs_test to be inline with #32312 * Fix salt-cloud paralell provisioning Closes #31632 * Ignore Raspbian in service.py __virtual__ (#32421) * Ignore Raspbian in service.py __virtual__ This prevents more than one execution module from trying to load as the service virtual module. Refs: #32413 * pack __salt__ before loading provider overrides We can (and should) pack here since we're just packing a reference to the object. __salt__ needs to be available when we're loading our provider overrides * Fix broken __salt__ dict in provider override Using ret.items() here sets ``__salt__`` to its items (tuple containing function name and reference), breaking usage of ``__salt__`` inside overridden functions. * Merge #32293 with test fixes (#32418) * Fix issue #11497 * Remove check for working directory presence in tests * Fix Domainname introspection Default value needs to be extracted from the container itself, because dockerd set Domainname value when network_mode=host. * Add pgjsonb_queue to queue doc index * Pylint fixes * Pass parser options into batch mode Resolves #31738 * Changed the target file in file.symlink test (#32443) * Argument name in docs should match actual arg name (#32445) Fixes #31851 * tests.integration: bypass MacOS TMPDIR, gettempdir (#32447) Updates 0edd532, 8f558a5. When logging in as root over `ssh root@host`, `$TMPDIR` and `tempfile.gettempdir()` are both set to a variation of: ``` /private/var/folders/zz/zyxvpxvq6csfxvn_n0000000000000/T/ ``` When logging in as root over `sudo -i`, `$TMPDIR` is unset and `tempfile.gettempdir()` is set to `/tmp`. My guess is that the second case is an unintended or uncorrected omision by Apple as they have introduced the longer, randomized temp path in a recent version of MacOS. * Issue #28706: Fix state user.present behavior. (#32448) - As mentionned in issue #28706, state user.present no longer remove user from groups if the keyword 'groups' with empty value '[]' is not explicitly set, salt will assume current groups are still wanted. * tests.integration: fix 4230c8a * Move the tables of virtual modules to individual documentation pages * Add new doc pages to toctree * Add external ref to windows package manager docs * Improve docstrings * Add documentation on virtual module provider overrides to the module docs * Clarify the scope of the provider param in states. * Add link to provider override docs to all package providers * Add link to provider override docs to all service providers * Add link to provider override docs to all user providers * dd link to provider override docs to all shadow providers * Add link to provider override docs to all group providers * Backport 31164 and 31364 (#32474) * Don't send REQ while another one is waiting for response. The message has to be removed from the queue the only *after* it's already processed to don't confuse send() functionality that expects empty queue means: there's no active sendings. * Fixed zeromq ReqMessageClient destroy * Add link to provider override docs to opkg.py This is a companion to https://github.com/saltstack/salt/pull/32458, but this module was not added until the 2016.3 branch, so the documentation is being updated there for this module. * Add documentation for some master/minion configs (#32454) Refs #32400 Adds docs for: - cli_summary - event_return_queue - event_return_whitelist - event_return_blacklist - file_recv_max_size - fileserver_followsymlinks - fileserver_ignoresymlinks - fileserver_limit_traversal * Automatically detect correct MySQL password column for 5.7 and fix setting passwords (#32440) * Automatically detect MySQL password column * Fix changing password in MySQL 5.7 * Fix lint test * Fix unit tests (?) They will still fail if "authentication_string" is legitimately the right column name, but I don't know what to do about that. * Additional unit test fix * Only unsub if we have a jid Closes #32479
2016-04-11 23:07:15 +00:00
# Avoid ${TMPDIR} and gettempdir() on MacOS as they yield a base path too long
# for unix sockets: ``error: AF_UNIX path too long``
# Gentoo Portage prefers ebuild tests are rooted in ${TMPDIR}
os.environ.get('TMPDIR', tempfile.gettempdir()) if not salt.utils.is_darwin() else '/tmp'
)
TMP = os.path.join(SYS_TMP_DIR, 'salt-tests-tmpdir')
FILES = os.path.join(INTEGRATION_TEST_DIR, 'files')
PYEXEC = 'python{0}.{1}'.format(*sys.version_info)
MOCKBIN = os.path.join(INTEGRATION_TEST_DIR, 'mockbin')
SCRIPT_DIR = os.path.join(CODE_DIR, 'scripts')
TMP_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-state-tree')
TMP_PRODENV_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-prodenv-state-tree')
TMP_CONF_DIR = os.path.join(TMP, 'config')
2016-05-12 12:30:16 +00:00
TMP_SUB_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, 'sub-minion')
2016-05-20 13:45:09 +00:00
TMP_SYNDIC_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, 'syndic-minion')
2016-05-12 12:30:16 +00:00
TMP_SYNDIC_MASTER_CONF_DIR = os.path.join(TMP_CONF_DIR, 'syndic-master')
CONF_DIR = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
PILLAR_DIR = os.path.join(FILES, 'pillar')
TMP_SCRIPT_DIR = os.path.join(TMP, 'scripts')
ENGINES_DIR = os.path.join(FILES, 'engines')
LOG_HANDLERS_DIR = os.path.join(FILES, 'log_handlers')
SCRIPT_TEMPLATES = {
'salt': [
'from salt.scripts import salt_main\n',
2016-05-12 11:32:36 +00:00
'if __name__ == \'__main__\':\n'
' salt_main()'
],
'salt-api': [
'import salt.cli\n',
2016-05-12 11:32:36 +00:00
'def main():\n',
' sapi = salt.cli.SaltAPI()',
' sapi.start()\n',
'if __name__ == \'__main__\':',
' main()'
],
'common': [
'from salt.scripts import salt_{0}\n',
'from salt.utils import is_windows\n\n',
2016-05-12 11:32:36 +00:00
'if __name__ == \'__main__\':\n',
' if is_windows():\n',
' import os.path\n',
' import py_compile\n',
' cfile = os.path.splitext(__file__)[0] + ".pyc"\n',
' if not os.path.exists(cfile):\n',
' py_compile.compile(__file__, cfile)\n',
' salt_{0}()'
]
}
RUNTIME_CONFIGS = {}
log = logging.getLogger(__name__)
2014-11-26 00:41:02 +00:00
def cleanup_runtime_config_instance(to_cleanup):
# Explicit and forced cleanup
for key in list(to_cleanup.keys()):
2014-11-26 00:41:02 +00:00
instance = to_cleanup.pop(key)
del instance
2014-11-26 00:41:02 +00:00
atexit.register(cleanup_runtime_config_instance, RUNTIME_CONFIGS)
_RUNTESTS_PORTS = {}
2014-11-26 00:41:02 +00:00
2016-05-09 00:27:09 +00:00
def get_unused_localhost_port():
'''
Return a random unused port on localhost
'''
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
usock.bind(('127.0.0.1', 0))
port = usock.getsockname()[1]
if port in (54505, 54506, 64505, 64506, 64510, 64511, 64520, 64521):
# These ports are hardcoded in the test configuration
port = get_unused_localhost_port()
usock.close()
return port
2016-09-15 21:37:00 +00:00
DARWIN = True if sys.platform.startswith('darwin') else False
BSD = True if 'bsd' in sys.platform else False
if DARWIN and port in _RUNTESTS_PORTS:
port = get_unused_localhost_port()
usock.close()
return port
_RUNTESTS_PORTS[port] = usock
2016-09-15 21:37:00 +00:00
if DARWIN or BSD:
usock.close()
return port
def close_open_sockets(sockets_dict):
for port in list(sockets_dict):
sock = sockets_dict.pop(port)
sock.close()
atexit.register(close_open_sockets, _RUNTESTS_PORTS)
SALT_LOG_PORT = get_unused_localhost_port()
def run_tests(*test_cases, **kwargs):
'''
Run integration tests for the chosen test cases.
Function uses optparse to set up test environment
'''
needs_daemon = kwargs.pop('needs_daemon', True)
if kwargs:
raise RuntimeError(
'The \'run_tests\' function only accepts \'needs_daemon\' as a '
'keyword argument'
)
class TestcaseParser(SaltTestcaseParser):
def setup_additional_options(self):
self.add_option(
'--sysinfo',
default=False,
action='store_true',
help='Print some system information.'
)
self.output_options_group.add_option(
'--no-colors',
'--no-colours',
default=False,
action='store_true',
help='Disable colour printing.'
)
if needs_daemon:
self.add_option(
'--transport',
default='zeromq',
2015-07-20 22:52:48 +00:00
choices=('zeromq', 'raet', 'tcp'),
help=('Select which transport to run the integration tests with, '
'zeromq, raet, or tcp. Default: %default')
)
def validate_options(self):
SaltTestcaseParser.validate_options(self)
# Transplant configuration
transport = None
if needs_daemon:
transport = self.options.transport
TestDaemon.transplant_configs(transport=transport)
2013-11-27 13:07:24 +00:00
def run_testcase(self, testcase, needs_daemon=True): # pylint: disable=W0221
if needs_daemon:
2014-02-03 10:31:13 +00:00
print(' * Setting up Salt daemons to execute tests')
with TestDaemon(self):
return SaltTestcaseParser.run_testcase(self, testcase)
return SaltTestcaseParser.run_testcase(self, testcase)
parser = TestcaseParser()
parser.parse_args()
for case in test_cases:
if parser.run_testcase(case, needs_daemon=needs_daemon) is False:
parser.finalize(1)
parser.finalize(0)
class ThreadingMixIn(socketserver.ThreadingMixIn):
daemon_threads = True
class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
def server_activate(self):
self.shutting_down = threading.Event()
socketserver.TCPServer.server_activate(self)
#super(ThreadedSocketServer, self).server_activate()
def server_close(self):
if hasattr(self, 'shutting_down'):
self.shutting_down.set()
socketserver.TCPServer.server_close(self)
#super(ThreadedSocketServer, self).server_close()
class SocketServerRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
unpacker = msgpack.Unpacker(encoding='utf-8')
while not self.server.shutting_down.is_set():
try:
wire_bytes = self.request.recv(1024)
if not wire_bytes:
break
unpacker.feed(wire_bytes)
for record_dict in unpacker:
record = logging.makeLogRecord(record_dict)
logger = logging.getLogger(record.name)
logger.handle(record)
except (EOFError, KeyboardInterrupt, SystemExit):
break
except socket.error as exc:
try:
2016-05-12 12:38:22 +00:00
if exc.errno == errno.WSAECONNRESET:
# Connection reset on windows
break
except AttributeError:
# We're not on windows
pass
log.exception(exc)
except Exception as exc:
log.exception(exc)
class ScriptPathMixin(object):
def get_script_path(self, script_name):
'''
Return the path to a testing runtime script
'''
if not os.path.isdir(TMP_SCRIPT_DIR):
os.makedirs(TMP_SCRIPT_DIR)
script_path = os.path.join(TMP_SCRIPT_DIR,
2016-05-12 11:32:36 +00:00
'cli_{0}.py'.format(script_name.replace('-', '_')))
if not os.path.isfile(script_path):
2016-05-12 11:32:36 +00:00
log.info('Generating {0}'.format(script_path))
# Late import
import salt.utils
with salt.utils.fopen(script_path, 'w') as sfh:
script_template = SCRIPT_TEMPLATES.get(script_name, None)
if script_template is None:
script_template = SCRIPT_TEMPLATES.get('common', None)
if script_template is None:
raise RuntimeError(
'{0} does not know how to handle the {1} script'.format(
self.__class__.__name__,
script_name
)
)
sfh.write(
'#!{0}\n\n'.format(sys.executable) +
'import sys\n' +
'CODE_DIR="{0}"\n'.format(CODE_DIR) +
'if CODE_DIR not in sys.path:\n' +
' sys.path.insert(0, CODE_DIR)\n\n' +
'\n'.join(script_template).format(script_name.replace('salt-', ''))
)
2016-05-08 18:39:57 +00:00
fst = os.stat(script_path)
os.chmod(script_path, fst.st_mode | stat.S_IEXEC)
log.info('Returning script path %r', script_path)
return script_path
class SaltScriptBase(ScriptPathMixin):
'''
Base class for Salt CLI scripts
'''
cli_script_name = None
def __init__(self,
config,
config_dir,
bin_dir_path,
io_loop=None):
self.config = config
self.config_dir = config_dir
self.bin_dir_path = bin_dir_path
self._io_loop = io_loop
@property
def io_loop(self):
'''
Return an IOLoop
'''
if self._io_loop is None:
self._io_loop = ioloop.IOLoop.current()
return self._io_loop
def get_script_args(self): # pylint: disable=no-self-use
'''
Returns any additional arguments to pass to the CLI script
'''
return []
class SaltDaemonScriptBase(SaltScriptBase, ShellTestCase):
'''
Base class for Salt Daemon CLI scripts
'''
def __init__(self, *args, **kwargs):
super(SaltDaemonScriptBase, self).__init__(*args, **kwargs)
self._running = multiprocessing.Event()
self._connectable = multiprocessing.Event()
self._process = None
def is_alive(self):
'''
Returns true if the process is alive
'''
return self._running.is_set()
def get_check_ports(self): # pylint: disable=no-self-use
'''
Return a list of ports to check against to ensure the daemon is running
'''
return []
def start(self):
'''
Start the daemon subprocess
'''
self._process = salt.utils.process.SignalHandlingMultiprocessingProcess(
target=self._start, args=(self._running,))
self._process.start()
self._running.set()
return True
def _start(self, running_event):
'''
The actual, coroutine aware, start method
'''
log.info('Starting %s %s DAEMON', self.display_name, self.__class__.__name__)
proc_args = [
self.get_script_path(self.cli_script_name),
'-c',
self.config_dir,
] + self.get_script_args()
if salt.utils.is_windows():
# Windows need the python executable to come first
proc_args.insert(0, sys.executable)
log.info('Running \'%s\' from %s...', ' '.join(proc_args), self.__class__.__name__)
2016-05-08 18:39:57 +00:00
try:
2016-05-19 23:49:14 +00:00
terminal = NonBlockingPopen(proc_args, cwd=CODE_DIR)
2016-05-08 18:39:57 +00:00
2016-05-10 00:47:09 +00:00
while running_event.is_set() and terminal.poll() is None:
2016-05-08 18:39:57 +00:00
# We're not actually interested in processing the output, just consume it
2016-05-10 00:47:09 +00:00
if terminal.stdout is not None:
terminal.recv()
if terminal.stderr is not None:
terminal.recv_err()
2016-05-10 01:19:51 +00:00
time.sleep(0.125)
2016-05-08 18:39:57 +00:00
except (SystemExit, KeyboardInterrupt):
2016-05-19 23:49:14 +00:00
pass
terminate_process_pid(terminal.pid)
2016-09-19 09:59:03 +00:00
# terminal.communicate()
def terminate(self):
'''
Terminate the started daemon
'''
log.info('Terminating %s %s DAEMON', self.display_name, self.__class__.__name__)
self._running.clear()
self._connectable.clear()
time.sleep(0.0125)
terminate_process_pid(self._process.pid)
2016-09-19 09:59:03 +00:00
# self._process.join()
log.info('%s %s DAEMON terminated', self.display_name, self.__class__.__name__)
def wait_until_running(self, timeout=None):
'''
Blocking call to wait for the daemon to start listening
'''
if self._connectable.is_set():
return True
try:
return self.io_loop.run_sync(self._wait_until_running, timeout=timeout)
except ioloop.TimeoutError:
return False
@gen.coroutine
def _wait_until_running(self):
'''
The actual, coroutine aware, call to wait for the daemon to start listening
'''
check_ports = self.get_check_ports()
log.debug(
'%s is checking the following ports to assure running status: %s',
self.__class__.__name__,
check_ports
)
while self._running.is_set():
if not check_ports:
self._connectable.set()
break
for port in set(check_ports):
if isinstance(port, int):
log.trace('Checking connectable status on port: %s', port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn = sock.connect_ex(('localhost', port))
if conn == 0:
log.debug('Port %s is connectable!', port)
check_ports.remove(port)
try:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
except socket.error as exc:
if not sys.platform.startswith('darwin'):
raise
try:
if exc.errno != errno.ENOTCONN:
raise
except AttributeError:
# This is not OSX !?
pass
del sock
elif isinstance(port, str):
joined = self.run_run('manage.joined', config_dir=self.config_dir)
joined = [x.lstrip('- ') for x in joined]
if port in joined:
check_ports.remove(port)
yield gen.sleep(0.125)
# A final sleep to allow the ioloop to do other things
yield gen.sleep(0.125)
2016-05-08 18:39:57 +00:00
log.info('All ports checked. %s running!', self.cli_script_name)
raise gen.Return(self._connectable.is_set())
class SaltMinion(SaltDaemonScriptBase):
'''
Class which runs the salt-minion daemon
'''
cli_script_name = 'salt-minion'
def get_script_args(self):
2016-05-11 00:28:11 +00:00
script_args = ['-l', 'quiet']
if salt.utils.is_windows() is False:
script_args.append('--disable-keepalive')
return script_args
def get_check_ports(self):
if salt.utils.is_windows():
return set([self.config['tcp_pub_port'],
self.config['tcp_pull_port']])
else:
return set([self.config['id']])
class SaltMaster(SaltDaemonScriptBase):
'''
Class which runs the salt-minion daemon
'''
cli_script_name = 'salt-master'
def get_check_ports(self):
2016-05-11 00:39:30 +00:00
#return set([self.config['runtests_conn_check_port']])
return set([self.config['ret_port'],
self.config['publish_port']])
# Disabled along with Pytest config until fixed.
# self.config['runtests_conn_check_port']])
def get_script_args(self):
#return ['-l', 'debug']
return ['-l', 'quiet']
class SaltSyndic(SaltDaemonScriptBase):
'''
Class which runs the salt-syndic daemon
'''
cli_script_name = 'salt-syndic'
def get_script_args(self):
#return ['-l', 'debug']
return ['-l', 'quiet']
def get_check_ports(self):
return set()
class TestDaemon(object):
'''
Set up the master and minion daemons, and run related cases
'''
2012-12-04 13:02:56 +00:00
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120
def __init__(self, parser):
self.parser = parser
self.colors = salt.utils.get_colors(self.parser.options.no_colors is False)
if salt.utils.is_windows():
# There's no shell color support on windows...
for key in self.colors:
self.colors[key] = ''
def __enter__(self):
'''
Start a master and minion
'''
# Setup the multiprocessing logging queue listener
salt_log_setup.setup_multiprocessing_logging_listener(
self.master_opts
)
# Set up PATH to mockbin
self._enter_mockbin()
if self.parser.options.transport == 'zeromq':
self.start_zeromq_daemons()
elif self.parser.options.transport == 'raet':
self.start_raet_daemons()
2015-07-20 22:52:48 +00:00
elif self.parser.options.transport == 'tcp':
self.start_tcp_daemons()
self.minion_targets = set(['minion', 'sub_minion'])
self.pre_setup_minions()
self.setup_minions()
if getattr(self.parser.options, 'ssh', False):
self.prep_ssh()
if self.parser.options.sysinfo:
try:
print_header(
'~~~~~~~ Versions Report ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(salt.version.versions_report()))
try:
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Minion Grains Information ', inline=True)
grains = self.client.cmd('minion', 'grains.items')
minion_opts = self.minion_opts.copy()
minion_opts['color'] = self.parser.options.no_colors is False
salt.output.display_output(grains, 'grains', minion_opts)
2013-02-08 00:35:50 +00:00
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
2015-01-13 16:28:41 +00:00
def start_daemon(self, cls, opts, start_fun):
def start(cls, opts, start_fun):
salt.utils.appendproctitle('{0}-{1}'.format(self.__class__.__name__, cls.__name__))
2015-01-13 16:28:41 +00:00
daemon = cls(opts)
getattr(daemon, start_fun)()
process = multiprocessing.Process(target=start,
args=(cls, opts, start_fun))
process.start()
return process
def start_zeromq_daemons(self):
'''
Fire up the daemons used for zeromq tests
'''
self.log_server = ThreadedSocketServer(('localhost', SALT_LOG_PORT), SocketServerRequestHandler)
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
self.log_server_process.daemon = True
self.log_server_process.start()
self.master_process = SaltMaster(self.master_opts, TMP_CONF_DIR, SCRIPT_DIR)
self.master_process.display_name = 'salt-master'
self.minion_process = SaltMinion(self.minion_opts, TMP_CONF_DIR, SCRIPT_DIR)
self.minion_process.display_name = 'salt-minion'
2016-06-24 15:10:00 +00:00
self.sub_minion_process = SaltMinion(self.sub_minion_opts, TMP_SUB_MINION_CONF_DIR, SCRIPT_DIR)
self.sub_minion_process.display_name = 'sub salt-minion'
2016-05-12 12:30:16 +00:00
self.smaster_process = SaltMaster(self.syndic_master_opts, TMP_SYNDIC_MASTER_CONF_DIR, SCRIPT_DIR)
self.smaster_process.display_name = 'syndic salt-master'
2016-05-20 13:45:09 +00:00
self.syndic_process = SaltSyndic(self.syndic_opts, TMP_SYNDIC_MINION_CONF_DIR, SCRIPT_DIR)
self.syndic_process.display_name = 'salt-syndic'
2016-06-24 15:10:00 +00:00
for process in (self.master_process, self.minion_process, self.sub_minion_process,
self.smaster_process, self.syndic_process):
sys.stdout.write(
' * {LIGHT_YELLOW}Starting {0} ... {ENDC}'.format(
process.display_name,
**self.colors
)
)
sys.stdout.flush()
process.start()
process.wait_until_running(timeout=15)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting {0} ... STARTED!\n{ENDC}'.format(
process.display_name,
**self.colors
)
)
sys.stdout.flush()
def start_raet_daemons(self):
'''
Fire up the raet daemons!
'''
import salt.daemons.flo
2015-01-13 16:28:41 +00:00
self.master_process = self.start_daemon(salt.daemons.flo.IofloMaster,
self.master_opts,
'start')
2015-01-13 16:28:41 +00:00
self.minion_process = self.start_daemon(salt.daemons.flo.IofloMinion,
self.minion_opts,
'tune_in')
2015-01-13 16:28:41 +00:00
self.sub_minion_process = self.start_daemon(salt.daemons.flo.IofloMinion,
self.sub_minion_opts,
'tune_in')
# Wait for the daemons to all spin up
time.sleep(5)
# self.smaster_process = self.start_daemon(salt.daemons.flo.IofloMaster,
2015-01-13 16:29:33 +00:00
# self.syndic_master_opts,
# 'start')
# no raet syndic daemon yet
2015-07-20 22:52:48 +00:00
start_tcp_daemons = start_zeromq_daemons
2014-05-09 22:00:36 +00:00
def prep_ssh(self):
'''
Generate keys and start an ssh daemon on an alternate port
'''
sys.stdout.write(
2016-08-31 12:41:05 +00:00
' * {LIGHT_GREEN}Starting {0} ... {ENDC}'.format(
'SSH server',
**self.colors
)
)
keygen = salt.utils.which('ssh-keygen')
sshd = salt.utils.which('sshd')
if not (keygen and sshd):
print('WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!')
return
if not os.path.exists(TMP_CONF_DIR):
os.makedirs(TMP_CONF_DIR)
2014-05-09 22:00:36 +00:00
2014-05-23 20:10:08 +00:00
# Generate client key
pub_key_test_file = os.path.join(TMP_CONF_DIR, 'key_test.pub')
priv_key_test_file = os.path.join(TMP_CONF_DIR, 'key_test')
if os.path.exists(pub_key_test_file):
os.remove(pub_key_test_file)
if os.path.exists(priv_key_test_file):
os.remove(priv_key_test_file)
keygen_process = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
2014-05-23 20:10:08 +00:00
'key_test',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=TMP_CONF_DIR
)
_, keygen_err = keygen_process.communicate()
if keygen_err:
2015-06-06 13:47:52 +00:00
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_err)))
sshd_config_path = os.path.join(FILES, 'conf/_ssh/sshd_config')
shutil.copy(sshd_config_path, TMP_CONF_DIR)
auth_key_file = os.path.join(TMP_CONF_DIR, 'key_test.pub')
2014-05-23 20:10:08 +00:00
# Generate server key
server_key_dir = os.path.join(TMP_CONF_DIR, 'server')
if not os.path.exists(server_key_dir):
os.makedirs(server_key_dir)
server_dsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key')
server_dsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key.pub')
server_ecdsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key')
server_ecdsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key.pub')
server_ed25519_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ed25519_key')
server_ed25519_pub_key_file = os.path.join(server_key_dir, 'ssh_host.ed25519_key.pub')
for server_key_file in (server_dsa_priv_key_file,
server_dsa_pub_key_file,
server_ecdsa_priv_key_file,
server_ecdsa_pub_key_file,
server_ed25519_priv_key_file,
server_ed25519_pub_key_file):
if os.path.exists(server_key_file):
os.remove(server_key_file)
keygen_process_dsa = subprocess.Popen(
[keygen, '-t',
'dsa',
'-b',
'1024',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_dsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_dsa_err = keygen_process_dsa.communicate()
if keygen_dsa_err:
2015-06-06 13:47:52 +00:00
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_dsa_err)))
2014-05-23 20:10:08 +00:00
keygen_process_ecdsa = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ecdsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_escda_err = keygen_process_ecdsa.communicate()
if keygen_escda_err:
2015-06-06 13:47:52 +00:00
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_escda_err)))
2014-05-23 20:10:08 +00:00
keygen_process_ed25519 = subprocess.Popen(
[keygen, '-t',
'ed25519',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ed25519_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
if keygen_ed25519_err:
2015-06-06 13:47:52 +00:00
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_ed25519_err)))
2014-05-23 20:10:08 +00:00
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, 'sshd_config'), 'a') as ssh_config:
ssh_config.write('AuthorizedKeysFile {0}\n'.format(auth_key_file))
if not keygen_dsa_err:
ssh_config.write('HostKey {0}\n'.format(server_dsa_priv_key_file))
if not keygen_escda_err:
ssh_config.write('HostKey {0}\n'.format(server_ecdsa_priv_key_file))
if not keygen_ed25519_err:
ssh_config.write('HostKey {0}\n'.format(server_ed25519_priv_key_file))
self.sshd_pidfile = os.path.join(TMP_CONF_DIR, 'sshd.pid')
self.sshd_process = subprocess.Popen(
[sshd, '-f', 'sshd_config', '-oPidFile={0}'.format(self.sshd_pidfile)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=TMP_CONF_DIR
)
_, sshd_err = self.sshd_process.communicate()
if sshd_err:
2015-06-06 13:47:52 +00:00
print('sshd had errors on startup: {0}'.format(salt.utils.to_str(sshd_err)))
else:
os.environ['SSH_DAEMON_RUNNING'] = 'True'
roster_path = os.path.join(FILES, 'conf/_ssh/roster')
shutil.copy(roster_path, TMP_CONF_DIR)
2015-11-02 23:15:02 +00:00
if salt.utils.is_windows():
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(win32api.GetUserName()))
roster.write(' priv: {0}/{1}'.format(TMP_CONF_DIR, 'key_test'))
else:
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(pwd.getpwuid(os.getuid()).pw_name))
roster.write(' priv: {0}/{1}'.format(TMP_CONF_DIR, 'key_test'))
2016-08-31 12:41:05 +00:00
sys.stdout.write(
' {LIGHT_GREEN}STARTED!\n{ENDC}'.format(
**self.colors
)
)
@classmethod
def config(cls, role):
'''
Return a configuration for a master/minion/syndic.
Currently these roles are:
* master
* minion
* syndic
* syndic_master
* sub_minion
'''
return RUNTIME_CONFIGS[role]
@classmethod
def config_location(cls):
return TMP_CONF_DIR
@property
def client(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
2014-04-30 19:06:27 +00:00
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
if 'runtime_client' not in RUNTIME_CONFIGS:
RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
mopts=self.master_opts
)
return RUNTIME_CONFIGS['runtime_client']
@classmethod
def transplant_configs(cls, transport='zeromq'):
if os.path.isdir(TMP_CONF_DIR):
shutil.rmtree(TMP_CONF_DIR)
os.makedirs(TMP_CONF_DIR)
2016-05-08 18:39:57 +00:00
os.makedirs(TMP_SUB_MINION_CONF_DIR)
os.makedirs(TMP_SYNDIC_MASTER_CONF_DIR)
2016-05-20 13:45:09 +00:00
os.makedirs(TMP_SYNDIC_MINION_CONF_DIR)
print(' * Transplanting configuration files to \'{0}\''.format(TMP_CONF_DIR))
2015-11-02 23:15:02 +00:00
if salt.utils.is_windows():
running_tests_user = win32api.GetUserName()
else:
running_tests_user = pwd.getpwuid(os.getuid()).pw_name
2016-06-22 23:56:50 +00:00
* Improve init script: specifically manage salt configurations rather than arbitrary salt processes (#32666) * * Improve init script: specifically manage salt configurations rather than arbitrary salt processes Unfortunately SysV init scripts tend to rummage through PIDs filtering for appropriate processes to manage. Unfortunately the filters are usually weak and don't account for similar processes run by other users, PIDs of dead processes being re-used for completely different executables, etc.. These weaknesses can result in killing unrelated processes with potentially serious results. These improvements to the SysV init script is a complete rewrite with the following improvements: * Specifically manage individual salt configurations rather than looking for salt minion-like processes. * Obtain salt minion information from the salt configuration - use the information to manage the specifically configured process. * Drop all of the platform-specific helper functions that allow the previously-mentioned weaknesses. + Unfortunately this means that the output information may not match the specific platform (this could easily be corrected). * Now can manage multiple salt processes started by different users + Unfortunately starts/stops/restarts as a group and is unable to manage them both as a group or as individual processes (this could easily be corrected) The new initscript also allows various control variables to be overridden by environment variables or through settings put in ``/etc/sysconf/salt`` or ``/etc/default/salt``. :SALTMINION_DEBUG: Dump each line expansion before execution, output system information on failure. Default: unset :SALTMINION_BINDIR: Location of ``salt-minion``, ``salt-call`` and other executables. Default: ``/usr/bin`` :SALTMINION_SYSCONFDIR: The parent directory for the ``salt`` configuration directory and the ``sysconfig`` or ``default`` directory. * Add lines that went missing in the rebase+squash
2016-05-09 17:57:13 +00:00
tests_known_hosts_file = os.path.join(TMP_CONF_DIR, 'salt_ssh_known_hosts')
with salt.utils.fopen(tests_known_hosts_file, 'w') as known_hosts:
known_hosts.write('')
2016-06-22 23:56:50 +00:00
2016-07-08 22:14:48 +00:00
# This master connects to syndic_master via a syndic
2016-06-22 23:56:50 +00:00
master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'master'))
* Improve init script: specifically manage salt configurations rather than arbitrary salt processes (#32666) * * Improve init script: specifically manage salt configurations rather than arbitrary salt processes Unfortunately SysV init scripts tend to rummage through PIDs filtering for appropriate processes to manage. Unfortunately the filters are usually weak and don't account for similar processes run by other users, PIDs of dead processes being re-used for completely different executables, etc.. These weaknesses can result in killing unrelated processes with potentially serious results. These improvements to the SysV init script is a complete rewrite with the following improvements: * Specifically manage individual salt configurations rather than looking for salt minion-like processes. * Obtain salt minion information from the salt configuration - use the information to manage the specifically configured process. * Drop all of the platform-specific helper functions that allow the previously-mentioned weaknesses. + Unfortunately this means that the output information may not match the specific platform (this could easily be corrected). * Now can manage multiple salt processes started by different users + Unfortunately starts/stops/restarts as a group and is unable to manage them both as a group or as individual processes (this could easily be corrected) The new initscript also allows various control variables to be overridden by environment variables or through settings put in ``/etc/sysconf/salt`` or ``/etc/default/salt``. :SALTMINION_DEBUG: Dump each line expansion before execution, output system information on failure. Default: unset :SALTMINION_BINDIR: Location of ``salt-minion``, ``salt-call`` and other executables. Default: ``/usr/bin`` :SALTMINION_SYSCONFDIR: The parent directory for the ``salt`` configuration directory and the ``sysconfig`` or ``default`` directory. * Add lines that went missing in the rebase+squash
2016-05-09 17:57:13 +00:00
master_opts['known_hosts_file'] = tests_known_hosts_file
2016-06-28 16:08:45 +00:00
master_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
master_opts['user'] = running_tests_user
master_opts['config_dir'] = TMP_CONF_DIR
master_opts['root_dir'] = os.path.join(TMP, 'rootdir')
master_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki', 'master')
2016-07-08 22:14:48 +00:00
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(master_opts)
# Let's update with the syndic configuration
syndic_opts.update(salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic')))
syndic_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
syndic_opts['config_dir'] = TMP_SYNDIC_MINION_CONF_DIR
# This minion connects to master
2016-06-22 23:56:50 +00:00
minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'minion'))
2016-07-08 21:59:08 +00:00
minion_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
minion_opts['user'] = running_tests_user
2016-06-24 15:10:00 +00:00
minion_opts['config_dir'] = TMP_CONF_DIR
minion_opts['root_dir'] = os.path.join(TMP, 'rootdir')
2016-07-20 15:44:23 +00:00
minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki')
2016-07-26 22:00:07 +00:00
minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
2016-07-08 22:14:48 +00:00
# This sub_minion also connects to master
sub_minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'sub_minion'))
2016-07-08 21:59:08 +00:00
sub_minion_opts['cachedir'] = os.path.join(TMP, 'rootdir-sub-minion', 'cache')
sub_minion_opts['user'] = running_tests_user
2016-06-24 15:10:00 +00:00
sub_minion_opts['config_dir'] = TMP_SUB_MINION_CONF_DIR
2016-05-20 13:45:09 +00:00
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
sub_minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir-sub-minion', 'pki', 'minion')
2016-07-26 22:00:07 +00:00
sub_minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
sub_minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
2016-07-08 22:14:48 +00:00
# This is the master of masters
syndic_master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic_master'))
syndic_master_opts['cachedir'] = os.path.join(TMP, 'rootdir-syndic-master', 'cache')
syndic_master_opts['user'] = running_tests_user
2016-06-24 15:10:00 +00:00
syndic_master_opts['config_dir'] = TMP_SYNDIC_MASTER_CONF_DIR
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
syndic_master_opts['pki_dir'] = os.path.join(TMP, 'rootdir-syndic-master', 'pki', 'master')
if transport == 'raet':
master_opts['transport'] = 'raet'
master_opts['raet_port'] = 64506
minion_opts['transport'] = 'raet'
minion_opts['raet_port'] = 64510
sub_minion_opts['transport'] = 'raet'
sub_minion_opts['raet_port'] = 64520
2015-04-24 22:27:22 +00:00
# syndic_master_opts['transport'] = 'raet'
2015-07-20 22:52:48 +00:00
if transport == 'tcp':
master_opts['transport'] = 'tcp'
minion_opts['transport'] = 'tcp'
sub_minion_opts['transport'] = 'tcp'
2015-07-21 22:42:33 +00:00
syndic_master_opts['transport'] = 'tcp'
2015-07-20 22:52:48 +00:00
# Set up config options that require internal data
master_opts['pillar_roots'] = syndic_master_opts['pillar_roots'] = {
'base': [os.path.join(FILES, 'pillar', 'base')]
}
master_opts['file_roots'] = syndic_master_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
TMP_PRODENV_STATE_TREE
]
}
for opts_dict in (master_opts, syndic_master_opts):
if 'ext_pillar' not in opts_dict:
opts_dict['ext_pillar'] = []
if salt.utils.is_windows():
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'type {0}'.format(os.path.join(FILES, 'ext.yaml'))})
else:
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'cat {0}'.format(os.path.join(FILES, 'ext.yaml'))})
2016-05-20 14:29:31 +00:00
for opts_dict in (master_opts, syndic_master_opts):
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(opts_dict['root_dir'], 'extension_modules')
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'extension_modules'
),
new_extension_modules_path
)
2016-05-20 14:33:40 +00:00
opts_dict['extension_modules'] = os.path.join(opts_dict['root_dir'], 'extension_modules')
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
syndic_opts[optname] = optname_path
syndic_master_opts[optname] = optname_path
for conf in (master_opts, minion_opts, sub_minion_opts, syndic_opts, syndic_master_opts):
if 'log_handlers_dirs' not in conf:
conf['log_handlers_dirs'] = []
conf['log_handlers_dirs'].insert(0, LOG_HANDLERS_DIR)
conf['runtests_log_port'] = SALT_LOG_PORT
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
for entry in os.listdir(CONF_DIR):
if entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master'):
# These have runtime computed values and will be handled
# differently
continue
entry_path = os.path.join(CONF_DIR, entry)
if os.path.isfile(entry_path):
shutil.copy(
entry_path,
os.path.join(TMP_CONF_DIR, entry)
)
elif os.path.isdir(entry_path):
shutil.copytree(
entry_path,
os.path.join(TMP_CONF_DIR, entry)
)
for entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master'):
computed_config = copy.deepcopy(locals()['{0}_opts'.format(entry)])
Clean up open filehandles (#35359) * salt/crypt.py: clean up open filehandles * salt/fileclient.py: clean up open filehandles * salt/grains/core.py: clean up open filehandles * salt/modules/cp.py: clean up open filehandles * salt/modules/data.py: clean up open filehandles * salt/modules/dnsutil.py: clean up open filehandles * salt/modules/dockerng.py: clean up open filehandles * salt/modules/inspectlib/collector.py: clean up open filehandles * salt/modules/file.py: clean up open filehandles * salt/modules/hosts.py: clean up open filehandles * salt/modules/incron.py: clean up open filehandles * salt/modules/dpkg.py: clean up open filehandles * salt/modules/linux_sysctl.py: clean up open filehandles * salt/modules/netbsd_sysctl.py: clean up open filehandles * salt/modules/network.py: clean up open filehandles * salt/modules/nftables.py: clean up open filehandles * salt/modules/openbsd_sysctl.py: clean up open filehandles * salt/modules/rh_ip.py: clean up open filehandles * salt/modules/portage_config.py: clean up open filehandles * salt/modules/status.py: clean up open filehandles * salt/modules/tls.py: clean up open filehandles * salt/modules/xapi.py: clean up open filehandles * salt/modules/x509.py: clean up open filehandles * salt/modules/virt.py: clean up open filehandles * salt/modules/zcbuildout.py: clean up open filehandles * salt/returners/local_cache.py: clean up open filehandles * salt/utils/cloud.py: clean up open filehandles * salt/states/pkgrepo.py: clean up open filehandles * salt/states/x509.py: clean up open filehandles * salt/transport/mixins/auth.py: clean up open filehandles * salt/utils/__init__.py: clean up open filehandles * salt/states/pkg.py: clean up open filehandles * salt/utils/minion.py: clean up open filehandles * salt/utils/openstack/nova.py: clean up open filehandles * salt/utils/openstack/swift.py: clean up open filehandles * salt/utils/process.py: clean up open filehandles * salt/utils/templates.py: clean up open filehandles * salt/utils/virt.py: clean up open filehandles * tests/integration/__init__.py: clean up open filehandles * tests/integration/cli/grains.py: clean up open filehandles * tests/integration/client/standard.py: clean up open filehandles * tests/integration/modules/hosts.py: clean up open filehandles * tests/unit/utils/vt_test.py: clean up open filehandles * tests/integration/shell/enabled.py: clean up open filehandles * tests/integration/states/cmd.py: clean up open filehandles * tests/integration/states/file.py: clean up open filehandles * tests/integration/states/match.py: clean up open filehandles * tests/unit/config_test.py: clean up open filehandles * tests/unit/templates/jinja_test.py: clean up open filehandles * tests/unit/utils/find_test.py: clean up open filehandles * tests/integration/modules/state.py: clean up open filehandles * Update dnsutil_test to reflect changes in fopen usage
2016-08-11 16:45:24 +00:00
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, entry), 'w') as fp_:
fp_.write(yaml.dump(computed_config, default_flow_style=False))
sub_minion_computed_config = copy.deepcopy(sub_minion_opts)
2016-05-08 18:39:57 +00:00
salt.utils.fopen(os.path.join(TMP_SUB_MINION_CONF_DIR, 'minion'), 'w').write(
yaml.dump(sub_minion_computed_config, default_flow_style=False)
)
2016-05-08 18:39:57 +00:00
shutil.copyfile(os.path.join(TMP_CONF_DIR, 'master'), os.path.join(TMP_SUB_MINION_CONF_DIR, 'master'))
syndic_master_computed_config = copy.deepcopy(syndic_master_opts)
2016-05-08 18:39:57 +00:00
salt.utils.fopen(os.path.join(TMP_SYNDIC_MASTER_CONF_DIR, 'master'), 'w').write(
yaml.dump(syndic_master_computed_config, default_flow_style=False)
)
syndic_computed_config = copy.deepcopy(syndic_opts)
2016-05-20 13:45:09 +00:00
salt.utils.fopen(os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'minion'), 'w').write(
yaml.dump(syndic_computed_config, default_flow_style=False)
)
2016-05-20 13:45:09 +00:00
shutil.copyfile(os.path.join(TMP_CONF_DIR, 'master'), os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'master'))
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(os.path.join(TMP_CONF_DIR, 'master'))
2016-05-20 13:45:09 +00:00
minion_opts = salt.config.minion_config(os.path.join(TMP_CONF_DIR, 'minion'))
syndic_opts = salt.config.syndic_config(
2016-05-20 13:45:09 +00:00
os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'master'),
os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'minion'),
)
2016-05-20 13:45:09 +00:00
sub_minion_opts = salt.config.minion_config(os.path.join(TMP_SUB_MINION_CONF_DIR, 'minion'))
syndic_master_opts = salt.config.master_config(os.path.join(TMP_SYNDIC_MASTER_CONF_DIR, 'master'))
RUNTIME_CONFIGS['master'] = freeze(master_opts)
RUNTIME_CONFIGS['minion'] = freeze(minion_opts)
RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts)
RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts)
RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts)
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
os.path.join(master_opts['pki_dir'], 'minions_pre'),
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
os.path.join(master_opts['pki_dir'], 'minions_denied'),
os.path.join(master_opts['cachedir'], 'jobs'),
2014-07-07 21:56:16 +00:00
os.path.join(master_opts['cachedir'], 'raet'),
os.path.join(master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(syndic_master_opts['pki_dir'], 'minions'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'),
os.path.join(syndic_master_opts['cachedir'], 'jobs'),
2014-07-07 21:56:16 +00:00
os.path.join(syndic_master_opts['cachedir'], 'raet'),
os.path.join(syndic_master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(master_opts['pki_dir'], 'accepted'),
os.path.join(master_opts['pki_dir'], 'rejected'),
os.path.join(master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['pki_dir'], 'accepted'),
os.path.join(syndic_master_opts['pki_dir'], 'rejected'),
os.path.join(syndic_master_opts['pki_dir'], 'pending'),
2014-07-07 21:56:16 +00:00
os.path.join(syndic_master_opts['cachedir'], 'raet'),
os.path.join(minion_opts['pki_dir'], 'accepted'),
os.path.join(minion_opts['pki_dir'], 'rejected'),
os.path.join(minion_opts['pki_dir'], 'pending'),
2014-07-07 21:56:16 +00:00
os.path.join(minion_opts['cachedir'], 'raet'),
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
2014-07-07 21:56:16 +00:00
os.path.join(sub_minion_opts['cachedir'], 'raet'),
os.path.dirname(master_opts['log_file']),
minion_opts['extension_modules'],
sub_minion_opts['extension_modules'],
sub_minion_opts['pki_dir'],
master_opts['sock_dir'],
syndic_master_opts['sock_dir'],
sub_minion_opts['sock_dir'],
minion_opts['sock_dir'],
TMP_STATE_TREE,
TMP_PRODENV_STATE_TREE,
TMP,
],
running_tests_user)
cls.master_opts = master_opts
cls.minion_opts = minion_opts
cls.sub_minion_opts = sub_minion_opts
cls.syndic_opts = syndic_opts
cls.syndic_master_opts = syndic_master_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
2016-06-24 15:10:00 +00:00
self.sub_minion_process.terminate()
self.minion_process.terminate()
self.master_process.terminate()
try:
self.syndic_process.terminate()
except AttributeError:
pass
try:
self.smaster_process.terminate()
except AttributeError:
pass
#salt.utils.process.clean_proc(self.sub_minion_process, wait_for_kill=50)
#self.sub_minion_process.join()
#salt.utils.process.clean_proc(self.minion_process, wait_for_kill=50)
#self.minion_process.join()
#salt.utils.process.clean_proc(self.master_process, wait_for_kill=50)
#self.master_process.join()
#try:
# salt.utils.process.clean_proc(self.syndic_process, wait_for_kill=50)
# self.syndic_process.join()
#except AttributeError:
# pass
#try:
# salt.utils.process.clean_proc(self.smaster_process, wait_for_kill=50)
# self.smaster_process.join()
#except AttributeError:
# pass
self.log_server.server_close()
self.log_server.shutdown()
self._exit_mockbin()
self._exit_ssh()
self.log_server_process.join()
# Shutdown the multiprocessing logging queue listener
salt_log_setup.shutdown_multiprocessing_logging()
2016-10-05 08:14:27 +00:00
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
def pre_setup_minions(self):
2013-01-10 07:25:02 +00:00
'''
Subclass this method for additional minion setups.
2013-01-10 07:25:02 +00:00
'''
def setup_minions(self):
2016-05-09 00:27:09 +00:00
'''
Minions setup routines
'''
def post_setup_minions(self):
2013-01-20 01:22:54 +00:00
'''
Subclass this method to execute code after the minions have been setup
2013-01-20 01:22:54 +00:00
'''
def _enter_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ['PATH'] = os.pathsep.join(path_items)
def _exit_ssh(self):
if hasattr(self, 'sshd_process'):
try:
self.sshd_process.kill()
except OSError as exc:
if exc.errno != 3:
raise
with salt.utils.fopen(self.sshd_pidfile) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError as exc:
if exc.errno != 3:
raise
def _exit_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ['PATH'] = os.pathsep.join(path_items)
2014-06-14 13:03:38 +00:00
@classmethod
def clean(cls):
2012-04-04 05:14:26 +00:00
'''
Clean out the tmp files
'''
2016-05-19 17:13:03 +00:00
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
for dirname in (TMP, TMP_STATE_TREE, TMP_PRODENV_STATE_TREE):
if os.path.isdir(dirname):
2016-05-19 17:13:03 +00:00
shutil.rmtree(dirname, onerror=remove_readonly)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
2014-12-25 07:06:29 +00:00
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
2013-11-27 13:07:24 +00:00
else: # pylint: disable=W0120
sys.stdout.write(
2014-12-25 07:06:29 +00:00
'\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information '
'back\n'.format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(
list(targets), 'saltutil.running', tgt_type='list'
)
return [
2014-11-21 19:41:22 +00:00
k for (k, v) in six.iteritems(running) if v and v[0]['jid'] == jid
]
def wait_for_minion_connections(self, targets, timeout):
salt.utils.appendproctitle('WaitForMinionConnections')
sys.stdout.write(
' {LIGHT_BLUE}*{ENDC} Waiting at most {0} for minions({1}) to '
'connect back\n'.format(
(timeout > 60 and
timedelta(seconds=timeout) or
'{0} secs'.format(timeout)),
', '.join(targets),
**self.colors
)
)
sys.stdout.flush()
expected_connections = set(targets)
now = datetime.now()
expire = now + timedelta(seconds=timeout)
while now <= expire:
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
2014-12-25 07:06:29 +00:00
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(expected_connections),
**self.colors
)
)
sys.stdout.flush()
try:
responses = self.client.cmd(
list(expected_connections), 'test.ping', tgt_type='list',
)
# we'll get this exception if the master process hasn't finished starting yet
except SaltClientError:
time.sleep(0.1)
now = datetime.now()
continue
for target in responses:
if target not in expected_connections:
# Someone(minion) else "listening"?
continue
expected_connections.remove(target)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns',
PNUM)
)
)
sys.stdout.write(
' {LIGHT_GREEN}*{ENDC} {0} connected.\n'.format(
target, **self.colors
)
)
sys.stdout.flush()
if not expected_connections:
return
time.sleep(1)
now = datetime.now()
2013-11-27 13:07:24 +00:00
else: # pylint: disable=W0120
print(
2014-12-25 07:06:29 +00:00
'\n {LIGHT_RED}*{ENDC} WARNING: Minions failed to connect '
'back. Tests requiring them WILL fail'.format(**self.colors)
)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('=', sep='=', inline=True)
raise SystemExit()
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
if not timeout:
timeout = 120
# Let's sync all connected minions
print(
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} '
'(saltutil.sync_{1})'.format(
', '.join(targets),
modules_kind,
**self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets), 'saltutil.sync_{0}'.format(modules_kind),
tgt_type='list',
timeout=999999999999999,
)
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
print(
2014-12-25 07:06:29 +00:00
' {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. '
'Tests requiring these {0} WILL fail'.format(
modules_kind, **self.colors)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
if rdata:
2014-11-21 19:41:22 +00:00
for name, output in six.iteritems(rdata):
if not output['ret']:
# Already synced!?
syncing.remove(name)
continue
2014-11-21 19:41:22 +00:00
if isinstance(output['ret'], six.string_types):
2013-11-10 19:57:41 +00:00
# An errors has occurred
print(
2014-12-25 07:06:29 +00:00
' {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: '
'{1}'.format(
name, output['ret'],
modules_kind,
**self.colors)
2013-11-10 19:57:41 +00:00
)
return False
print(
' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: '
'{1}'.format(
name,
', '.join(output['ret']),
modules_kind, **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
2014-12-25 07:06:29 +00:00
' {LIGHT_RED}*{ENDC} {0} already synced??? '
'{1}'.format(name, output, **self.colors)
)
return True
def sync_minion_states(self, targets, timeout=None):
salt.utils.appendproctitle('SyncMinionStates')
self.sync_minion_modules_('states', targets, timeout=timeout)
def sync_minion_modules(self, targets, timeout=None):
salt.utils.appendproctitle('SyncMinionModules')
self.sync_minion_modules_('modules', targets, timeout=timeout)
2015-10-31 19:43:17 +00:00
def sync_minion_grains(self, targets, timeout=None):
self.sync_minion_modules_('grains', targets, timeout=timeout)
2012-05-28 03:00:10 +00:00
class AdaptedConfigurationTestCaseMixIn(object):
__slots__ = ()
def get_config(self, config_for, from_scratch=False):
if from_scratch:
if config_for in ('master', 'syndic_master'):
return salt.config.master_config(self.get_config_file_path(config_for))
elif config_for in ('minion', 'sub_minion'):
return salt.config.minion_config(self.get_config_file_path(config_for))
elif config_for in ('syndic',):
return salt.config.syndic_config(
self.get_config_file_path(config_for),
self.get_config_file_path('minion')
)
elif config_for == 'client_config':
return salt.config.client_config(self.get_config_file_path('master'))
if config_for not in RUNTIME_CONFIGS:
2014-10-06 19:05:46 +00:00
if config_for in ('master', 'syndic_master'):
RUNTIME_CONFIGS[config_for] = freeze(
salt.config.master_config(self.get_config_file_path(config_for))
)
elif config_for in ('minion', 'sub_minion'):
RUNTIME_CONFIGS[config_for] = freeze(
salt.config.minion_config(self.get_config_file_path(config_for))
)
2014-10-06 19:05:46 +00:00
elif config_for in ('syndic',):
RUNTIME_CONFIGS[config_for] = freeze(
salt.config.syndic_config(
self.get_config_file_path(config_for),
self.get_config_file_path('minion')
)
2014-10-06 19:05:46 +00:00
)
elif config_for == 'client_config':
RUNTIME_CONFIGS[config_for] = freeze(
salt.config.client_config(self.get_config_file_path('master'))
)
return RUNTIME_CONFIGS[config_for]
def get_config_dir(self):
return TMP_CONF_DIR
def get_config_file_path(self, filename):
if filename == 'syndic_master':
2016-05-12 12:30:16 +00:00
return os.path.join(TMP_SYNDIC_MASTER_CONF_DIR, 'master')
if filename == 'syndic':
2016-05-20 13:45:09 +00:00
return os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'minion')
if filename == 'sub_minion':
2016-05-12 12:30:16 +00:00
return os.path.join(TMP_SUB_MINION_CONF_DIR, 'minion')
return os.path.join(TMP_CONF_DIR, filename)
@property
def master_opts(self):
'''
* Improve init script: specifically manage salt configurations rather than arbitrary salt processes (#32666) * * Improve init script: specifically manage salt configurations rather than arbitrary salt processes Unfortunately SysV init scripts tend to rummage through PIDs filtering for appropriate processes to manage. Unfortunately the filters are usually weak and don't account for similar processes run by other users, PIDs of dead processes being re-used for completely different executables, etc.. These weaknesses can result in killing unrelated processes with potentially serious results. These improvements to the SysV init script is a complete rewrite with the following improvements: * Specifically manage individual salt configurations rather than looking for salt minion-like processes. * Obtain salt minion information from the salt configuration - use the information to manage the specifically configured process. * Drop all of the platform-specific helper functions that allow the previously-mentioned weaknesses. + Unfortunately this means that the output information may not match the specific platform (this could easily be corrected). * Now can manage multiple salt processes started by different users + Unfortunately starts/stops/restarts as a group and is unable to manage them both as a group or as individual processes (this could easily be corrected) The new initscript also allows various control variables to be overridden by environment variables or through settings put in ``/etc/sysconf/salt`` or ``/etc/default/salt``. :SALTMINION_DEBUG: Dump each line expansion before execution, output system information on failure. Default: unset :SALTMINION_BINDIR: Location of ``salt-minion``, ``salt-call`` and other executables. Default: ``/usr/bin`` :SALTMINION_SYSCONFDIR: The parent directory for the ``salt`` configuration directory and the ``sysconfig`` or ``default`` directory. * Add lines that went missing in the rebase+squash
2016-05-09 17:57:13 +00:00
Return the options used for the master
'''
return self.get_config('master')
2014-07-15 00:41:16 +00:00
class SaltMinionEventAssertsMixIn(object):
'''
Asserts to verify that a given event was seen
'''
def __new__(cls, *args, **kwargs):
# We have to cross-call to re-gen a config
cls.q = multiprocessing.Queue()
cls.fetch_proc = multiprocessing.Process(target=cls._fetch, args=(cls.q,))
cls.fetch_proc.start()
return object.__new__(cls)
2016-09-19 10:08:58 +00:00
def __exit__(self, *args, **kwargs):
self.fetch_proc.join()
@staticmethod
def _fetch(q):
'''
Collect events and store them
'''
def _clean_queue():
print('Cleaning queue!')
while not q.empty():
queue_item = q.get()
queue_item.task_done()
atexit.register(_clean_queue)
a_config = AdaptedConfigurationTestCaseMixIn()
event = salt.utils.event.get_event('minion', sock_dir=a_config.get_config('minion')['sock_dir'], opts=a_config.get_config('minion'))
while True:
try:
events = event.get_event(full=False)
except Exception:
# This is broad but we'll see all kinds of issues right now
# if we drop the proc out from under the socket while we're reading
pass
q.put(events)
def assertMinionEventFired(self, tag):
#TODO
raise salt.exceptions.NotImplemented('assertMinionEventFired() not implemented')
def assertMinionEventReceived(self, desired_event):
queue_wait = 5 # 2.5s
while self.q.empty():
time.sleep(0.5) # Wait for events to be pushed into the queue
queue_wait -= 1
if queue_wait <= 0:
raise AssertionError('Queue wait timer expired')
while not self.q.empty(): # This is not thread-safe and may be inaccurate
event = self.q.get()
if isinstance(event, dict):
event.pop('_stamp')
if desired_event == event:
self.fetch_proc.terminate()
return True
self.fetch_proc.terminate()
raise AssertionError('Event {0} was not received by minion'.format(desired_event))
class SaltClientTestCaseMixIn(AdaptedConfigurationTestCaseMixIn):
_salt_client_config_file_name_ = 'master'
2015-06-06 13:47:52 +00:00
__slots__ = ()
@property
def client(self):
if 'runtime_client' not in RUNTIME_CONFIGS:
RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
mopts=self.get_config(self._salt_client_config_file_name_, from_scratch=True)
2014-10-06 20:38:10 +00:00
)
return RUNTIME_CONFIGS['runtime_client']
class ModuleCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a module function
'''
2016-08-02 00:26:18 +00:00
def runTest(self):
2016-08-02 07:45:48 +00:00
'''
TODO remove after salt-testing PR #74 is merged and deployed
'''
try:
super(ModuleCase, self).runTest()
except AttributeError:
log.error('ModuleCase runTest() could not execute. Requires at least v2016.8.3 of '
'salt-testing package')
2016-08-02 00:26:18 +00:00
2012-10-17 18:06:17 +00:00
def minion_run(self, _function, *args, **kw):
'''
Run a single salt function on the 'minion' target and condition
the return down to match the behavior of the raw function call
'''
return self.run_function(_function, args, **kw)
def run_function(self, function, arg=(), minion_tgt='minion', timeout=25,
**kwargs):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
know_to_return_none = (
'file.chown', 'file.chgrp', 'ssh.recv_known_host'
)
orig = self.client.cmd(
minion_tgt, function, arg, timeout=timeout, kwarg=kwargs
)
if minion_tgt not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion \'{0}\'. Command output: {1}'.format(
minion_tgt, orig
)
)
elif orig[minion_tgt] is None and function not in know_to_return_none:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get \'{0}\' from '
'the minion \'{1}\'. Command output: {2}'.format(
function, minion_tgt, orig
)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(
orig[minion_tgt]
)
return orig[minion_tgt]
2012-05-13 02:40:28 +00:00
def run_state(self, function, **kwargs):
'''
Run the state.single command and return the state return structure
'''
ret = self.run_function('state.single', [function], **kwargs)
return self._check_state_return(ret)
2012-05-13 02:40:28 +00:00
@property
def minion_opts(self):
'''
Return the options used for the minion
'''
return self.get_config('minion')
@property
def sub_minion_opts(self):
'''
Return the options used for the sub_minion
'''
return self.get_config('sub_minion')
def _check_state_return(self, ret):
if isinstance(ret, dict):
# This is the supposed return format for state calls
return ret
if isinstance(ret, list):
jids = []
# These are usually errors
for item in ret[:]:
2014-11-21 19:41:22 +00:00
if not isinstance(item, six.string_types):
# We don't know how to handle this
continue
match = STATE_FUNCTION_RUNNING_RE.match(item)
if not match:
# We don't know how to handle this
continue
2014-02-05 02:32:04 +00:00
jid = match.group('jid')
if jid in jids:
continue
jids.append(jid)
job_data = self.run_function(
'saltutil.find_job', [jid]
)
job_kill = self.run_function('saltutil.kill_job', [jid])
msg = (
'A running state.single was found causing a state lock. '
'Job details: \'{0}\' Killing Job Returned: \'{1}\''.format(
2014-02-08 00:33:26 +00:00
job_data, job_kill
)
)
2014-02-08 00:33:26 +00:00
ret.append('[TEST SUITE ENFORCED]{0}'
'[/TEST SUITE ENFORCED]'.format(msg))
return ret
2012-05-29 16:40:20 +00:00
class SyndicCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a syndic based execution test
'''
_salt_client_config_file_name_ = 'syndic_master'
def run_function(self, function, arg=()):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
orig = self.client.cmd('minion', function, arg, timeout=25)
if 'minion' not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion. Command output: {0}'.format(orig)
)
return orig['minion']
2012-04-21 22:58:03 +00:00
2012-05-13 02:40:28 +00:00
class ShellCase(AdaptedConfigurationTestCaseMixIn, ShellTestCase, ScriptPathMixin):
2012-04-21 22:58:03 +00:00
'''
Execute a test for a shell command
'''
2013-06-25 13:13:14 +00:00
_code_dir_ = CODE_DIR
_script_dir_ = SCRIPT_DIR
_python_executable_ = PYEXEC
def chdir(self, dirname):
try:
os.chdir(dirname)
except OSError:
os.chdir(INTEGRATION_TEST_DIR)
def run_salt(self, arg_str, with_retcode=False, catch_stderr=False, timeout=30): # pylint: disable=W0221
2012-04-21 23:52:29 +00:00
'''
2012-05-23 14:14:16 +00:00
Execute salt
2012-04-21 23:52:29 +00:00
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=timeout)
2012-04-21 23:52:29 +00:00
2016-07-27 16:42:44 +00:00
def run_ssh(self, arg_str, with_retcode=False, catch_stderr=False, timeout=25): # pylint: disable=W0221
'''
Execute salt-ssh
'''
arg_str = '-ldebug -W -c {0} -i --priv {1} --roster-file {2} --out=json localhost {3}'.format(self.get_config_dir(), os.path.join(TMP_CONF_DIR, 'key_test'), os.path.join(TMP_CONF_DIR, 'roster'), arg_str)
2016-07-27 16:40:02 +00:00
return self.run_script('salt-ssh', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=timeout, raw=True)
2016-06-29 16:49:14 +00:00
def run_run(self, arg_str, with_retcode=False, catch_stderr=False, async=False, timeout=60, config_dir=None):
2012-04-21 23:52:29 +00:00
'''
2012-05-23 14:14:16 +00:00
Execute salt-run
2012-04-21 23:52:29 +00:00
'''
2016-06-29 16:49:14 +00:00
arg_str = '-c {0}{async_flag} -t {timeout} {1}'.format(config_dir or self.get_config_dir(),
2015-01-09 20:26:25 +00:00
arg_str,
2015-01-26 17:59:55 +00:00
timeout=timeout,
2015-01-09 20:26:25 +00:00
async_flag=' --async' if async else '')
return self.run_script('salt-run', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=30)
2012-04-21 23:52:29 +00:00
2016-09-01 16:32:23 +00:00
def run_run_plus(self, fun, *arg, **kwargs):
2012-05-28 03:00:10 +00:00
'''
2016-09-01 16:39:54 +00:00
Execute the runner function and return the return data and output in a dict
2012-05-28 03:00:10 +00:00
'''
ret = {'fun': fun}
from_scratch = bool(kwargs.pop('__reload_config', False))
# Have to create an empty dict and then update it, as the result from
# self.get_config() is an ImmutableDict which cannot be updated.
opts = {}
opts.update(self.get_config('client_config', from_scratch=from_scratch))
opts_arg = list(arg)
if kwargs:
opts_arg.append({'__kwarg__': True})
opts_arg[-1].update(kwargs)
opts.update({'doc': False, 'fun': fun, 'arg': opts_arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret['return'] = runner.run()
try:
ret['jid'] = runner.jid
except AttributeError:
ret['jid'] = None
# Compile output
# TODO: Support outputters other than nested
opts['color'] = False
opts['output_file'] = cStringIO()
try:
salt.output.display_output(ret['return'], opts=opts)
ret['out'] = opts['output_file'].getvalue().splitlines()
finally:
opts['output_file'].close()
2012-05-28 03:00:10 +00:00
return ret
def run_key(self, arg_str, catch_stderr=False, with_retcode=False):
'''
Execute salt-key
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script(
'salt-key',
arg_str,
catch_stderr=catch_stderr,
with_retcode=with_retcode,
timeout=30
)
def run_cp(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt-cp
'''
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-cp', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=30)
def run_call(self, arg_str, with_retcode=False, catch_stderr=False):
'''
Execute salt-call.
'''
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
return self.run_script('salt-call', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=30)
2016-10-25 19:30:41 +00:00
def run_cloud(self, arg_str, catch_stderr=False, timeout=15):
'''
Execute salt-cloud
'''
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
2016-10-25 19:30:41 +00:00
return self.run_script('salt-cloud', arg_str, catch_stderr,
timeout=timeout)
2013-06-25 13:41:26 +00:00
class ShellCaseCommonTestsMixIn(CheckShellBinaryNameAndVersionMixIn):
2014-12-15 18:28:15 +00:00
_call_binary_expected_version_ = salt.version.__version__
def test_salt_with_git_version(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
from salt.utils import which
from salt.version import __version_info__, SaltStackVersion
git = which('git')
if not git:
self.skipTest('The git binary is not available')
# Let's get the output of git describe
process = subprocess.Popen(
[git, 'describe', '--tags', '--first-parent', '--match', 'v[0-9]*'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=CODE_DIR
)
out, err = process.communicate()
if process.returncode != 0:
process = subprocess.Popen(
[git, 'describe', '--tags', '--match', 'v[0-9]*'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=CODE_DIR
)
out, err = process.communicate()
if not out:
self.skipTest(
'Failed to get the output of \'git describe\'. '
'Error: \'{0}\''.format(
2015-06-06 13:47:52 +00:00
salt.utils.to_str(err)
)
)
parsed_version = SaltStackVersion.parse(out)
if parsed_version.info < __version_info__:
self.skipTest(
'We\'re likely about to release a new version. This test '
'would fail. Parsed(\'{0}\') < Expected(\'{1}\')'.format(
parsed_version.info, __version_info__
)
)
elif parsed_version.info != __version_info__:
self.skipTest(
'In order to get the proper salt version with the '
'git hash you need to update salt\'s local git '
'tags. Something like: \'git fetch --tags\' or '
'\'git fetch --tags upstream\' if you followed '
'salt\'s contribute documentation. The version '
'string WILL NOT include the git hash.'
)
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(parsed_version.string, out)
@requires_sshd_server
class SSHCase(ShellCase):
'''
Execute a command via salt-ssh
'''
def _arg_str(self, function, arg):
return '{0} {1}'.format(function, ' '.join(arg))
def run_function(self, function, arg=(), timeout=90, **kwargs):
'''
We use a 90s timeout here, which some slower systems do end up needing
'''
2016-07-27 16:40:02 +00:00
ret = self.run_ssh(self._arg_str(function, arg), timeout=timeout)
try:
return json.loads(ret)['localhost']
except Exception:
return ret
class SaltReturnAssertsMixIn(object):
def assertReturnSaltType(self, ret):
try:
self.assertTrue(isinstance(ret, dict))
except AssertionError:
raise AssertionError(
'{0} is not dict. Salt returned: {1}'.format(
type(ret).__name__, ret
)
)
def assertReturnNonEmptySaltType(self, ret):
self.assertReturnSaltType(ret)
try:
self.assertNotEqual(ret, {})
except AssertionError:
raise AssertionError(
'{} is equal to {}. Salt returned an empty dictionary.'
)
def __return_valid_keys(self, keys):
if isinstance(keys, tuple):
# If it's a tuple, turn it into a list
keys = list(keys)
2014-11-21 19:41:22 +00:00
elif isinstance(keys, six.string_types):
# If it's a string, make it a one item list
keys = [keys]
elif not isinstance(keys, list):
# If we've reached here, it's a bad type passed to keys
raise RuntimeError('The passed keys need to be a list')
return keys
def __getWithinSaltReturn(self, ret, keys):
self.assertReturnNonEmptySaltType(ret)
keys = self.__return_valid_keys(keys)
okeys = keys[:]
2014-11-21 19:41:22 +00:00
for part in six.itervalues(ret):
try:
ret_item = part[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[\'{0}\']'.format(k) for k in keys]), part
)
)
while okeys:
try:
ret_item = ret_item[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[\'{0}\']'.format(k) for k in keys]), part
)
)
return ret_item
def assertSaltTrueReturn(self, ret):
try:
self.assertTrue(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not True. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned:\n{0}'.format(
pprint.pformat(ret)
)
)
def assertSaltFalseReturn(self, ret):
try:
self.assertFalse(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not False. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertSaltNoneReturn(self, ret):
try:
self.assertIsNone(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not None. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertInSaltComment(self, in_comment, ret):
return self.assertIn(
in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertNotInSaltComment(self, not_in_comment, ret):
return self.assertNotIn(
not_in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertSaltCommentRegexpMatches(self, ret, pattern):
return self.assertInSaltReturnRegexpMatches(ret, pattern, 'comment')
def assertInSaltStateWarning(self, in_comment, ret):
return self.assertIn(
in_comment, self.__getWithinSaltReturn(ret, 'warnings')
)
def assertNotInSaltStateWarning(self, not_in_comment, ret):
return self.assertNotIn(
not_in_comment, self.__getWithinSaltReturn(ret, 'warnings')
)
def assertInSaltReturn(self, item_to_check, ret, keys):
return self.assertIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertNotInSaltReturn(self, item_to_check, ret, keys):
return self.assertNotIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertInSaltReturnRegexpMatches(self, ret, pattern, keys=()):
return self.assertRegexpMatches(
self.__getWithinSaltReturn(ret, keys), pattern
)
def assertSaltStateChangesEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)
def assertSaltStateChangesNotEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertNotEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)