2013-11-27 11:19:24 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2012-03-28 06:10:13 +00:00
|
|
|
'''
|
|
|
|
Set up the Salt integration test suite
|
|
|
|
'''
|
|
|
|
|
|
|
|
# Import Python libs
|
2014-11-21 19:05:13 +00:00
|
|
|
from __future__ import absolute_import, print_function
|
2012-02-20 12:18:13 +00:00
|
|
|
import os
|
2014-02-04 21:07:13 +00:00
|
|
|
import re
|
2012-04-21 23:27:59 +00:00
|
|
|
import sys
|
2014-06-12 23:59:55 +00:00
|
|
|
import copy
|
|
|
|
import json
|
2013-06-29 19:57:23 +00:00
|
|
|
import time
|
2016-05-08 18:39:57 +00:00
|
|
|
import stat
|
2016-05-10 13:05:32 +00:00
|
|
|
import errno
|
2014-10-30 06:45:52 +00:00
|
|
|
import signal
|
2012-03-28 06:10:13 +00:00
|
|
|
import shutil
|
2013-04-25 19:59:34 +00:00
|
|
|
import pprint
|
2014-11-26 00:41:02 +00:00
|
|
|
import atexit
|
2016-05-06 18:49:08 +00:00
|
|
|
import socket
|
2013-04-25 19:59:34 +00:00
|
|
|
import logging
|
2013-06-29 19:57:23 +00:00
|
|
|
import tempfile
|
2016-05-06 18:49:08 +00:00
|
|
|
import threading
|
2013-01-14 12:35:42 +00:00
|
|
|
import subprocess
|
2013-06-29 19:57:23 +00:00
|
|
|
import multiprocessing
|
2012-11-17 17:37:53 +00:00
|
|
|
from datetime import datetime, timedelta
|
2012-06-23 09:47:53 +00:00
|
|
|
try:
|
|
|
|
import pwd
|
|
|
|
except ImportError:
|
|
|
|
pass
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2014-02-04 21:07:13 +00:00
|
|
|
STATE_FUNCTION_RUNNING_RE = re.compile(
|
|
|
|
r'''The function (?:"|')(?P<state_func>.*)(?:"|') is running as PID '''
|
|
|
|
r'(?P<pid>[\d]+) and was started at (?P<date>.*) with jid (?P<jid>[\d]+)'
|
|
|
|
)
|
2013-06-24 23:50:02 +00:00
|
|
|
INTEGRATION_TEST_DIR = os.path.dirname(
|
|
|
|
os.path.normpath(os.path.abspath(__file__))
|
|
|
|
)
|
|
|
|
CODE_DIR = os.path.dirname(os.path.dirname(INTEGRATION_TEST_DIR))
|
2013-06-29 19:57:23 +00:00
|
|
|
|
|
|
|
# Import Salt Testing libs
|
|
|
|
from salttesting import TestCase
|
|
|
|
from salttesting.case import ShellTestCase
|
|
|
|
from salttesting.mixins import CheckShellBinaryNameAndVersionMixIn
|
|
|
|
from salttesting.parser import PNUM, print_header, SaltTestcaseParser
|
2014-12-13 00:11:41 +00:00
|
|
|
from salttesting.helpers import requires_sshd_server
|
2013-06-29 19:57:23 +00:00
|
|
|
from salttesting.helpers import ensure_in_syspath, RedirectStdStreams
|
2013-06-24 23:50:02 +00:00
|
|
|
|
|
|
|
# Update sys.path
|
2015-09-03 09:49:01 +00:00
|
|
|
ensure_in_syspath(CODE_DIR)
|
2013-06-24 23:50:02 +00:00
|
|
|
|
2012-03-28 06:10:13 +00:00
|
|
|
# Import Salt libs
|
2012-02-20 12:18:13 +00:00
|
|
|
import salt
|
|
|
|
import salt.config
|
|
|
|
import salt.minion
|
2012-05-28 03:00:10 +00:00
|
|
|
import salt.runner
|
2013-02-08 00:35:50 +00:00
|
|
|
import salt.output
|
2013-08-16 01:01:26 +00:00
|
|
|
import salt.version
|
2014-01-11 20:51:32 +00:00
|
|
|
import salt.utils
|
2014-07-08 18:18:24 +00:00
|
|
|
import salt.utils.process
|
2015-08-03 12:20:08 +00:00
|
|
|
import salt.log.setup as salt_log_setup
|
2016-08-31 03:57:06 +00:00
|
|
|
from salt.ext import six
|
2012-03-09 07:47:34 +00:00
|
|
|
from salt.utils.verify import verify_env
|
2014-10-06 18:59:42 +00:00
|
|
|
from salt.utils.immutabletypes import freeze
|
2016-05-10 00:47:09 +00:00
|
|
|
from salt.utils.nb_popen import NonBlockingPopen
|
2015-04-04 22:20:52 +00:00
|
|
|
from salt.exceptions import SaltClientError
|
2012-11-06 11:20:06 +00:00
|
|
|
|
2016-08-31 03:57:06 +00:00
|
|
|
try:
|
|
|
|
from shlex import quote as _quote # pylint: disable=E0611
|
|
|
|
except ImportError:
|
|
|
|
from pipes import quote as _quote
|
|
|
|
|
2014-06-27 19:13:41 +00:00
|
|
|
try:
|
|
|
|
import salt.master
|
|
|
|
except ImportError:
|
2014-11-22 10:12:06 +00:00
|
|
|
# Not required for raet tests
|
2014-06-27 19:13:41 +00:00
|
|
|
pass
|
|
|
|
|
2013-08-30 17:24:15 +00:00
|
|
|
# Import 3rd-party libs
|
|
|
|
import yaml
|
2016-05-06 18:49:08 +00:00
|
|
|
import msgpack
|
2014-11-22 10:12:06 +00:00
|
|
|
import salt.ext.six as six
|
2016-09-01 15:20:18 +00:00
|
|
|
from salt.ext.six.moves import cStringIO
|
2016-07-19 17:04:24 +00:00
|
|
|
|
|
|
|
try:
|
2016-07-20 21:34:58 +00:00
|
|
|
import salt.ext.six.moves.socketserver as socketserver
|
2016-07-19 17:04:24 +00:00
|
|
|
except ImportError:
|
|
|
|
import socketserver
|
2016-06-29 20:30:18 +00:00
|
|
|
|
2015-11-02 23:15:02 +00:00
|
|
|
if salt.utils.is_windows():
|
|
|
|
import win32api
|
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
from tornado import gen
|
|
|
|
from tornado import ioloop
|
|
|
|
|
2016-08-23 17:35:07 +00:00
|
|
|
try:
|
|
|
|
from salttesting.helpers import terminate_process_pid
|
|
|
|
except ImportError:
|
|
|
|
# Once the latest salt-testing works against salt's develop branch
|
|
|
|
# uncomment the following 2 lines and delete the function defined
|
|
|
|
# in this except
|
|
|
|
#print('Please upgrade your version of salt-testing')
|
|
|
|
#sys.exit(1)
|
|
|
|
|
|
|
|
import psutil
|
|
|
|
|
|
|
|
def terminate_process_pid(pid, only_children=False):
|
|
|
|
children = []
|
|
|
|
process = None
|
|
|
|
|
|
|
|
# Let's begin the shutdown routines
|
|
|
|
try:
|
|
|
|
process = psutil.Process(pid)
|
|
|
|
if hasattr(process, 'children'):
|
|
|
|
children = process.children(recursive=True)
|
|
|
|
except psutil.NoSuchProcess:
|
|
|
|
log.info('No process with the PID %s was found running', pid)
|
|
|
|
|
|
|
|
if process and only_children is False:
|
2016-08-31 19:09:51 +00:00
|
|
|
try:
|
|
|
|
cmdline = process.cmdline()
|
|
|
|
except psutil.AccessDenied:
|
|
|
|
# OSX denies us access to the above information
|
|
|
|
cmdline = None
|
2016-08-23 17:35:07 +00:00
|
|
|
if not cmdline:
|
2016-08-31 15:35:20 +00:00
|
|
|
try:
|
|
|
|
cmdline = process.as_dict()
|
|
|
|
except psutil.NoSuchProcess as exc:
|
|
|
|
log.debug('No such process found. Stacktrace: {0}'.format(exc))
|
2016-08-23 17:35:07 +00:00
|
|
|
|
|
|
|
if psutil.pid_exists(pid):
|
|
|
|
log.info('Terminating process: %s', cmdline)
|
|
|
|
process.terminate()
|
|
|
|
try:
|
2016-11-24 13:19:56 +00:00
|
|
|
process.wait(timeout=10)
|
2016-08-23 17:35:07 +00:00
|
|
|
except psutil.TimeoutExpired:
|
|
|
|
pass
|
|
|
|
|
|
|
|
if psutil.pid_exists(pid):
|
|
|
|
log.warning('Killing process: %s', cmdline)
|
|
|
|
process.kill()
|
|
|
|
|
|
|
|
if psutil.pid_exists(pid):
|
|
|
|
log.warning('Process left behind which we were unable to kill: %s', cmdline)
|
|
|
|
if children:
|
|
|
|
# Lets log and kill any child processes which salt left behind
|
2016-11-24 13:19:56 +00:00
|
|
|
def kill_children(_children, kill=False):
|
2016-08-23 17:35:07 +00:00
|
|
|
for child in _children[:][::-1]: # Iterate over a reversed copy of the list
|
|
|
|
try:
|
|
|
|
if not kill and child.status() == psutil.STATUS_ZOMBIE:
|
|
|
|
# Zombie processes will exit once child processes also exit
|
|
|
|
continue
|
2016-09-28 21:53:51 +00:00
|
|
|
try:
|
|
|
|
cmdline = child.cmdline()
|
|
|
|
except psutil.AccessDenied as err:
|
|
|
|
log.debug('Cannot obtain child process cmdline: %s', err)
|
|
|
|
cmdline = ''
|
2016-08-23 17:35:07 +00:00
|
|
|
if not cmdline:
|
|
|
|
cmdline = child.as_dict()
|
|
|
|
if kill:
|
|
|
|
log.warning('Killing child process left behind: %s', cmdline)
|
|
|
|
child.kill()
|
2016-11-24 13:19:56 +00:00
|
|
|
else:
|
2016-08-23 17:35:07 +00:00
|
|
|
log.warning('Terminating child process left behind: %s', cmdline)
|
|
|
|
child.terminate()
|
|
|
|
if not psutil.pid_exists(child.pid):
|
|
|
|
_children.remove(child)
|
|
|
|
except psutil.NoSuchProcess:
|
|
|
|
_children.remove(child)
|
2016-11-24 13:19:56 +00:00
|
|
|
try:
|
|
|
|
kill_children([child for child in children if child.is_running()
|
|
|
|
and not any(sys.argv[0] in cmd for cmd in child.cmdline())])
|
|
|
|
except psutil.AccessDenied:
|
|
|
|
# OSX denies us access to the above information
|
|
|
|
kill_children(children)
|
2016-08-23 17:35:07 +00:00
|
|
|
|
|
|
|
if children:
|
2016-11-24 13:19:56 +00:00
|
|
|
psutil.wait_procs(children, timeout=3, callback=lambda proc: kill_children(children, kill=True))
|
2016-08-23 17:35:07 +00:00
|
|
|
|
|
|
|
if children:
|
2016-11-24 13:19:56 +00:00
|
|
|
psutil.wait_procs(children, timeout=1, callback=lambda proc: kill_children(children, kill=True))
|
|
|
|
|
2015-11-02 23:15:02 +00:00
|
|
|
|
2016-03-31 00:52:43 +00:00
|
|
|
SYS_TMP_DIR = os.path.realpath(
|
[develop] Merge forward from 2016.3 to develop (#32494)
* fix sorting by latest version when called with an attribute
* remove reference to master_alive_check
* Fixes saltstack/salt#28262
* Resolve memory leak in authentication
* outputter virt_list does not exist anymore
* Update proxmox documentation
* Fix documentation on boto_asg and boto_elb modules and states
* modules.win_timezone: don't list all zones in debug log
* Correcty index glusterfs bricks
Fixes issue #32311
* Cleaner deprecation process with decorators
* Add deprecation decorator scaffold
* Capture type error and unhandled exceptions while function calls
* Aware of the current and future version of deprecation
* Implement initially is_deprecated decorator
* Add an alias for the capitalization
* Fix capitalization easier way
* Remove an extra line
* Add successor name to the deprecation decorator.
* Granulate logging and error messages.
* Implement function swapper
* Raise later the caught exception
* Clarify exception message
* Save function original name
* Remove an extra line
* Hide an alternative hidden function name in the error message, preserving the error itself
* Rename variable as private
* Add a method to detect if a function is using its previous version
* Message to the log and/or raise an exception accordingly to the status of used function
* Log an error along with the exception
* Add internal method documentation
* Add documentation and usage process for decorator "is_deprecated"
* Add documentation and process usage for the decorator "with_deprecated"
* Hide private method name
* Fix PEP8, re-word the error message
* Deprecate basic uptime function
* Add initial decorator unit test
* Rename old/new functions, mock versions
* Move frequent data to the test setup
* Add logging on EOL exception
* Rename and document high to low version test on is_deprecated
* Implement a test on low to high version of is_deprecated decorator
* Add a correction to the test description
* Remove a dead code
* Implement a test for high to low version on is_deprecated, using with_successor param
* Correct typso adn mistaeks
* Implement high to low version with successor param on is_deprecated
* Setup a virtual name for the module
* Implement test for with_deprecated should raise an exception if same deprecated function not found
* Implement test for with_deprecated an old function is picked up if configured
* Correct test description purpose
* Implement test with_deprecated when no deprecation is requested
* Add logging test to the configured deprecation request
* Add logging testing when deprecated version wasn't requested
* Implement test EOL for with_deprecated decorator
* Correct test explanation
* Rename the test
* Implement with_deprecated no EOL, deprecated other function name
* Implement with_deprecated, deprecated other function name, EOL reached
* Add test description for the with_deprecated + with_name + EOL
* Fix confusing test names
* Add logging test to the is_deprecated decorator when function as not found.
* Add more test point to each test, remove empty lines
* Bugfix: at certain conditions a wrong alias name is reported to the log
* Fix a typo in a comment
* Add test for the logging
* Disable a pylint: None will _never_ be raised
* Fix test for the deprecated "status.uptime" version
* Bugfix: Do not yank raised exceptions
* Remove unnecessary decorator
* Add test for the new uptime
* Add test for the new uptime fails when /proc/uptime does not exists
* Rename old test case
* Skip test for the UTC time, unless freeze time is used.
* Fix pylint
* Fix documentation
* Bugfix: proxy-pass the docstring of the decorated function
* Lint fix
* Fixes saltstack/salt#28262 for 2015.5 branch
* Update master config docs
* Improve git_pillar documentation/logging
* Add note about different behavior of top file in git_pillar
* Make log entry for a missing pillar SLS file more accurate for git_pillar
* FreeBSD supports packages in format java/openjdk7 so the prior commit broke that functionality. Check freebsd/pkg#1409 for more info.
* FreeBSD supports packages in format java/openjdk7 so the prior commit broke that functionality. Check freebsd/pkg#1409 for more info.
* Update glusterfs_test to be inline with #32312
* Fix salt-cloud paralell provisioning
Closes #31632
* Ignore Raspbian in service.py __virtual__ (#32421)
* Ignore Raspbian in service.py __virtual__
This prevents more than one execution module from trying to load as the
service virtual module.
Refs: #32413
* pack __salt__ before loading provider overrides
We can (and should) pack here since we're just packing a reference to the
object. __salt__ needs to be available when we're loading our provider
overrides
* Fix broken __salt__ dict in provider override
Using ret.items() here sets ``__salt__`` to its items (tuple containing
function name and reference), breaking usage of ``__salt__`` inside
overridden functions.
* Merge #32293 with test fixes (#32418)
* Fix issue #11497
* Remove check for working directory presence in tests
* Fix Domainname introspection
Default value needs to be extracted from the container itself,
because dockerd set Domainname value when network_mode=host.
* Add pgjsonb_queue to queue doc index
* Pylint fixes
* Pass parser options into batch mode
Resolves #31738
* Changed the target file in file.symlink test (#32443)
* Argument name in docs should match actual arg name (#32445)
Fixes #31851
* tests.integration: bypass MacOS TMPDIR, gettempdir (#32447)
Updates 0edd532, 8f558a5.
When logging in as root over `ssh root@host`, `$TMPDIR` and
`tempfile.gettempdir()` are both set to a variation of:
```
/private/var/folders/zz/zyxvpxvq6csfxvn_n0000000000000/T/
```
When logging in as root over `sudo -i`, `$TMPDIR` is unset and
`tempfile.gettempdir()` is set to `/tmp`.
My guess is that the second case is an unintended or uncorrected omision
by Apple as they have introduced the longer, randomized temp path in a
recent version of MacOS.
* Issue #28706: Fix state user.present behavior. (#32448)
- As mentionned in issue #28706, state user.present no longer remove
user from groups if the keyword 'groups' with empty value '[]' is not
explicitly set, salt will assume current groups are still wanted.
* tests.integration: fix 4230c8a
* Move the tables of virtual modules to individual documentation pages
* Add new doc pages to toctree
* Add external ref to windows package manager docs
* Improve docstrings
* Add documentation on virtual module provider overrides to the module docs
* Clarify the scope of the provider param in states.
* Add link to provider override docs to all package providers
* Add link to provider override docs to all service providers
* Add link to provider override docs to all user providers
* dd link to provider override docs to all shadow providers
* Add link to provider override docs to all group providers
* Backport 31164 and 31364 (#32474)
* Don't send REQ while another one is waiting for response.
The message has to be removed from the queue the only *after* it's
already processed to don't confuse send() functionality that expects
empty queue means: there's no active sendings.
* Fixed zeromq ReqMessageClient destroy
* Add link to provider override docs to opkg.py
This is a companion to https://github.com/saltstack/salt/pull/32458, but
this module was not added until the 2016.3 branch, so the documentation
is being updated there for this module.
* Add documentation for some master/minion configs (#32454)
Refs #32400
Adds docs for:
- cli_summary
- event_return_queue
- event_return_whitelist
- event_return_blacklist
- file_recv_max_size
- fileserver_followsymlinks
- fileserver_ignoresymlinks
- fileserver_limit_traversal
* Automatically detect correct MySQL password column for 5.7 and fix setting passwords (#32440)
* Automatically detect MySQL password column
* Fix changing password in MySQL 5.7
* Fix lint test
* Fix unit tests (?)
They will still fail if "authentication_string" is legitimately the right column name, but I don't know what to do about that.
* Additional unit test fix
* Only unsub if we have a jid
Closes #32479
2016-04-11 23:07:15 +00:00
|
|
|
# Avoid ${TMPDIR} and gettempdir() on MacOS as they yield a base path too long
|
|
|
|
# for unix sockets: ``error: AF_UNIX path too long``
|
|
|
|
# Gentoo Portage prefers ebuild tests are rooted in ${TMPDIR}
|
2016-05-31 18:20:48 +00:00
|
|
|
os.environ.get('TMPDIR', tempfile.gettempdir()) if not salt.utils.is_darwin() else '/tmp'
|
2016-03-31 00:52:43 +00:00
|
|
|
)
|
2012-11-06 12:14:02 +00:00
|
|
|
TMP = os.path.join(SYS_TMP_DIR, 'salt-tests-tmpdir')
|
2012-02-20 12:18:13 +00:00
|
|
|
FILES = os.path.join(INTEGRATION_TEST_DIR, 'files')
|
2013-11-02 22:40:09 +00:00
|
|
|
PYEXEC = 'python{0}.{1}'.format(*sys.version_info)
|
2012-07-26 16:14:00 +00:00
|
|
|
MOCKBIN = os.path.join(INTEGRATION_TEST_DIR, 'mockbin')
|
2013-06-29 19:57:23 +00:00
|
|
|
SCRIPT_DIR = os.path.join(CODE_DIR, 'scripts')
|
2012-12-12 18:38:39 +00:00
|
|
|
TMP_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-state-tree')
|
2013-11-02 22:40:09 +00:00
|
|
|
TMP_PRODENV_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-prodenv-state-tree')
|
2013-08-30 17:24:15 +00:00
|
|
|
TMP_CONF_DIR = os.path.join(TMP, 'config')
|
2016-05-12 12:30:16 +00:00
|
|
|
TMP_SUB_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, 'sub-minion')
|
2016-05-20 13:45:09 +00:00
|
|
|
TMP_SYNDIC_MINION_CONF_DIR = os.path.join(TMP_CONF_DIR, 'syndic-minion')
|
2016-05-12 12:30:16 +00:00
|
|
|
TMP_SYNDIC_MASTER_CONF_DIR = os.path.join(TMP_CONF_DIR, 'syndic-master')
|
2014-06-12 23:59:55 +00:00
|
|
|
CONF_DIR = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
|
2016-02-19 01:25:46 +00:00
|
|
|
PILLAR_DIR = os.path.join(FILES, 'pillar')
|
2016-05-23 17:04:47 +00:00
|
|
|
TMP_SCRIPT_DIR = os.path.join(TMP, 'scripts')
|
2016-05-06 18:49:08 +00:00
|
|
|
ENGINES_DIR = os.path.join(FILES, 'engines')
|
|
|
|
LOG_HANDLERS_DIR = os.path.join(FILES, 'log_handlers')
|
|
|
|
|
|
|
|
SCRIPT_TEMPLATES = {
|
|
|
|
'salt': [
|
|
|
|
'from salt.scripts import salt_main\n',
|
2016-05-12 11:32:36 +00:00
|
|
|
'if __name__ == \'__main__\':\n'
|
2016-05-06 18:49:08 +00:00
|
|
|
' salt_main()'
|
|
|
|
],
|
|
|
|
'salt-api': [
|
|
|
|
'import salt.cli\n',
|
2016-05-12 11:32:36 +00:00
|
|
|
'def main():\n',
|
2016-05-06 18:49:08 +00:00
|
|
|
' sapi = salt.cli.SaltAPI()',
|
2016-11-29 21:17:01 +00:00
|
|
|
' sapi.start()\n',
|
2016-05-06 18:49:08 +00:00
|
|
|
'if __name__ == \'__main__\':',
|
|
|
|
' main()'
|
|
|
|
],
|
|
|
|
'common': [
|
|
|
|
'from salt.scripts import salt_{0}\n',
|
2016-05-10 21:57:07 +00:00
|
|
|
'from salt.utils import is_windows\n\n',
|
2016-05-12 11:32:36 +00:00
|
|
|
'if __name__ == \'__main__\':\n',
|
2016-05-10 21:57:07 +00:00
|
|
|
' if is_windows():\n',
|
|
|
|
' import os.path\n',
|
|
|
|
' import py_compile\n',
|
|
|
|
' cfile = os.path.splitext(__file__)[0] + ".pyc"\n',
|
|
|
|
' if not os.path.exists(cfile):\n',
|
|
|
|
' py_compile.compile(__file__, cfile)\n',
|
2016-05-06 18:49:08 +00:00
|
|
|
' salt_{0}()'
|
|
|
|
]
|
|
|
|
}
|
2014-10-06 18:59:42 +00:00
|
|
|
RUNTIME_CONFIGS = {}
|
|
|
|
|
2013-04-25 19:59:34 +00:00
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2014-11-26 00:41:02 +00:00
|
|
|
def cleanup_runtime_config_instance(to_cleanup):
|
2014-11-26 00:20:27 +00:00
|
|
|
# Explicit and forced cleanup
|
2015-01-28 14:24:40 +00:00
|
|
|
for key in list(to_cleanup.keys()):
|
2014-11-26 00:41:02 +00:00
|
|
|
instance = to_cleanup.pop(key)
|
2014-11-26 00:20:27 +00:00
|
|
|
del instance
|
|
|
|
|
|
|
|
|
2014-11-26 00:41:02 +00:00
|
|
|
atexit.register(cleanup_runtime_config_instance, RUNTIME_CONFIGS)
|
|
|
|
|
2016-05-10 12:19:43 +00:00
|
|
|
_RUNTESTS_PORTS = {}
|
2014-11-26 00:41:02 +00:00
|
|
|
|
2016-05-09 00:27:09 +00:00
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
def get_unused_localhost_port():
|
|
|
|
'''
|
|
|
|
Return a random unused port on localhost
|
|
|
|
'''
|
|
|
|
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
|
2016-05-10 12:19:43 +00:00
|
|
|
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
2016-05-06 18:49:08 +00:00
|
|
|
usock.bind(('127.0.0.1', 0))
|
|
|
|
port = usock.getsockname()[1]
|
2016-07-07 18:27:48 +00:00
|
|
|
if port in (54505, 54506, 64505, 64506, 64510, 64511, 64520, 64521):
|
2016-05-23 12:54:02 +00:00
|
|
|
# These ports are hardcoded in the test configuration
|
2016-05-23 15:33:29 +00:00
|
|
|
port = get_unused_localhost_port()
|
2016-05-23 12:54:02 +00:00
|
|
|
usock.close()
|
2016-05-23 15:33:29 +00:00
|
|
|
return port
|
2016-05-23 12:54:02 +00:00
|
|
|
|
2016-09-15 21:37:00 +00:00
|
|
|
DARWIN = True if sys.platform.startswith('darwin') else False
|
|
|
|
BSD = True if 'bsd' in sys.platform else False
|
|
|
|
|
|
|
|
if DARWIN and port in _RUNTESTS_PORTS:
|
2016-08-31 19:09:51 +00:00
|
|
|
port = get_unused_localhost_port()
|
|
|
|
usock.close()
|
|
|
|
return port
|
|
|
|
|
2016-05-10 12:19:43 +00:00
|
|
|
_RUNTESTS_PORTS[port] = usock
|
|
|
|
|
2016-09-15 21:37:00 +00:00
|
|
|
if DARWIN or BSD:
|
2016-08-31 19:09:51 +00:00
|
|
|
usock.close()
|
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
return port
|
|
|
|
|
|
|
|
|
2016-05-10 12:19:43 +00:00
|
|
|
def close_open_sockets(sockets_dict):
|
|
|
|
for port in list(sockets_dict):
|
|
|
|
sock = sockets_dict.pop(port)
|
|
|
|
sock.close()
|
|
|
|
|
|
|
|
|
|
|
|
atexit.register(close_open_sockets, _RUNTESTS_PORTS)
|
|
|
|
|
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
SALT_LOG_PORT = get_unused_localhost_port()
|
|
|
|
|
|
|
|
|
2013-10-03 15:09:57 +00:00
|
|
|
def run_tests(*test_cases, **kwargs):
|
2012-07-20 06:21:01 +00:00
|
|
|
'''
|
2013-10-03 15:09:57 +00:00
|
|
|
Run integration tests for the chosen test cases.
|
2012-07-20 06:21:01 +00:00
|
|
|
|
|
|
|
Function uses optparse to set up test environment
|
|
|
|
'''
|
2013-10-03 15:09:57 +00:00
|
|
|
|
|
|
|
needs_daemon = kwargs.pop('needs_daemon', True)
|
|
|
|
if kwargs:
|
|
|
|
raise RuntimeError(
|
|
|
|
'The \'run_tests\' function only accepts \'needs_daemon\' as a '
|
|
|
|
'keyword argument'
|
|
|
|
)
|
|
|
|
|
2013-06-25 10:45:28 +00:00
|
|
|
class TestcaseParser(SaltTestcaseParser):
|
2013-06-24 18:37:07 +00:00
|
|
|
def setup_additional_options(self):
|
|
|
|
self.add_option(
|
|
|
|
'--sysinfo',
|
|
|
|
default=False,
|
|
|
|
action='store_true',
|
|
|
|
help='Print some system information.'
|
|
|
|
)
|
|
|
|
self.output_options_group.add_option(
|
|
|
|
'--no-colors',
|
|
|
|
'--no-colours',
|
|
|
|
default=False,
|
|
|
|
action='store_true',
|
|
|
|
help='Disable colour printing.'
|
|
|
|
)
|
2014-06-12 23:59:55 +00:00
|
|
|
if needs_daemon:
|
|
|
|
self.add_option(
|
|
|
|
'--transport',
|
|
|
|
default='zeromq',
|
2015-07-20 22:52:48 +00:00
|
|
|
choices=('zeromq', 'raet', 'tcp'),
|
|
|
|
help=('Select which transport to run the integration tests with, '
|
|
|
|
'zeromq, raet, or tcp. Default: %default')
|
2014-06-12 23:59:55 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
def validate_options(self):
|
|
|
|
SaltTestcaseParser.validate_options(self)
|
|
|
|
# Transplant configuration
|
|
|
|
transport = None
|
|
|
|
if needs_daemon:
|
|
|
|
transport = self.options.transport
|
|
|
|
TestDaemon.transplant_configs(transport=transport)
|
2012-07-20 06:21:01 +00:00
|
|
|
|
2013-11-27 13:07:24 +00:00
|
|
|
def run_testcase(self, testcase, needs_daemon=True): # pylint: disable=W0221
|
2013-06-24 22:53:59 +00:00
|
|
|
if needs_daemon:
|
2014-02-03 10:31:13 +00:00
|
|
|
print(' * Setting up Salt daemons to execute tests')
|
2013-06-24 22:53:59 +00:00
|
|
|
with TestDaemon(self):
|
2013-06-25 10:45:28 +00:00
|
|
|
return SaltTestcaseParser.run_testcase(self, testcase)
|
|
|
|
return SaltTestcaseParser.run_testcase(self, testcase)
|
|
|
|
|
|
|
|
parser = TestcaseParser()
|
2013-06-24 18:37:07 +00:00
|
|
|
parser.parse_args()
|
2013-10-03 15:09:57 +00:00
|
|
|
for case in test_cases:
|
|
|
|
if parser.run_testcase(case, needs_daemon=needs_daemon) is False:
|
|
|
|
parser.finalize(1)
|
2013-06-24 18:37:07 +00:00
|
|
|
parser.finalize(0)
|
2012-07-20 06:21:01 +00:00
|
|
|
|
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
class ThreadingMixIn(socketserver.ThreadingMixIn):
|
|
|
|
daemon_threads = True
|
|
|
|
|
|
|
|
|
|
|
|
class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer):
|
|
|
|
|
2016-05-10 12:19:43 +00:00
|
|
|
allow_reuse_address = True
|
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
def server_activate(self):
|
|
|
|
self.shutting_down = threading.Event()
|
|
|
|
socketserver.TCPServer.server_activate(self)
|
|
|
|
#super(ThreadedSocketServer, self).server_activate()
|
|
|
|
|
|
|
|
def server_close(self):
|
2016-05-10 12:19:43 +00:00
|
|
|
if hasattr(self, 'shutting_down'):
|
|
|
|
self.shutting_down.set()
|
2016-05-06 18:49:08 +00:00
|
|
|
socketserver.TCPServer.server_close(self)
|
|
|
|
#super(ThreadedSocketServer, self).server_close()
|
|
|
|
|
|
|
|
|
|
|
|
class SocketServerRequestHandler(socketserver.StreamRequestHandler):
|
|
|
|
def handle(self):
|
|
|
|
unpacker = msgpack.Unpacker(encoding='utf-8')
|
|
|
|
while not self.server.shutting_down.is_set():
|
|
|
|
try:
|
|
|
|
wire_bytes = self.request.recv(1024)
|
|
|
|
if not wire_bytes:
|
|
|
|
break
|
|
|
|
unpacker.feed(wire_bytes)
|
|
|
|
for record_dict in unpacker:
|
|
|
|
record = logging.makeLogRecord(record_dict)
|
|
|
|
logger = logging.getLogger(record.name)
|
|
|
|
logger.handle(record)
|
|
|
|
except (EOFError, KeyboardInterrupt, SystemExit):
|
|
|
|
break
|
2016-05-12 12:35:19 +00:00
|
|
|
except socket.error as exc:
|
|
|
|
try:
|
2016-05-12 12:38:22 +00:00
|
|
|
if exc.errno == errno.WSAECONNRESET:
|
2016-05-12 12:35:19 +00:00
|
|
|
# Connection reset on windows
|
|
|
|
break
|
|
|
|
except AttributeError:
|
|
|
|
# We're not on windows
|
|
|
|
pass
|
|
|
|
log.exception(exc)
|
2016-05-06 18:49:08 +00:00
|
|
|
except Exception as exc:
|
|
|
|
log.exception(exc)
|
|
|
|
|
|
|
|
|
2016-05-12 12:03:37 +00:00
|
|
|
class ScriptPathMixin(object):
|
2016-05-06 18:49:08 +00:00
|
|
|
|
|
|
|
def get_script_path(self, script_name):
|
|
|
|
'''
|
|
|
|
Return the path to a testing runtime script
|
|
|
|
'''
|
|
|
|
if not os.path.isdir(TMP_SCRIPT_DIR):
|
|
|
|
os.makedirs(TMP_SCRIPT_DIR)
|
|
|
|
|
2016-05-12 00:44:45 +00:00
|
|
|
script_path = os.path.join(TMP_SCRIPT_DIR,
|
2016-05-12 11:32:36 +00:00
|
|
|
'cli_{0}.py'.format(script_name.replace('-', '_')))
|
2016-05-12 12:03:37 +00:00
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
if not os.path.isfile(script_path):
|
2016-05-12 11:32:36 +00:00
|
|
|
log.info('Generating {0}'.format(script_path))
|
2016-05-06 18:49:08 +00:00
|
|
|
|
|
|
|
# Late import
|
|
|
|
import salt.utils
|
|
|
|
|
|
|
|
with salt.utils.fopen(script_path, 'w') as sfh:
|
|
|
|
script_template = SCRIPT_TEMPLATES.get(script_name, None)
|
|
|
|
if script_template is None:
|
|
|
|
script_template = SCRIPT_TEMPLATES.get('common', None)
|
|
|
|
if script_template is None:
|
|
|
|
raise RuntimeError(
|
|
|
|
'{0} does not know how to handle the {1} script'.format(
|
|
|
|
self.__class__.__name__,
|
|
|
|
script_name
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sfh.write(
|
2016-05-10 17:07:50 +00:00
|
|
|
'#!{0}\n\n'.format(sys.executable) +
|
|
|
|
'import sys\n' +
|
|
|
|
'CODE_DIR="{0}"\n'.format(CODE_DIR) +
|
|
|
|
'if CODE_DIR not in sys.path:\n' +
|
|
|
|
' sys.path.insert(0, CODE_DIR)\n\n' +
|
2016-05-06 18:49:08 +00:00
|
|
|
'\n'.join(script_template).format(script_name.replace('salt-', ''))
|
|
|
|
)
|
2016-05-08 18:39:57 +00:00
|
|
|
fst = os.stat(script_path)
|
|
|
|
os.chmod(script_path, fst.st_mode | stat.S_IEXEC)
|
2016-05-06 18:49:08 +00:00
|
|
|
|
2016-05-12 12:03:37 +00:00
|
|
|
log.info('Returning script path %r', script_path)
|
2016-05-06 18:49:08 +00:00
|
|
|
return script_path
|
|
|
|
|
2016-05-12 12:03:37 +00:00
|
|
|
|
|
|
|
class SaltScriptBase(ScriptPathMixin):
|
|
|
|
'''
|
|
|
|
Base class for Salt CLI scripts
|
|
|
|
'''
|
|
|
|
|
|
|
|
cli_script_name = None
|
|
|
|
|
|
|
|
def __init__(self,
|
|
|
|
config,
|
|
|
|
config_dir,
|
|
|
|
bin_dir_path,
|
|
|
|
io_loop=None):
|
|
|
|
self.config = config
|
|
|
|
self.config_dir = config_dir
|
|
|
|
self.bin_dir_path = bin_dir_path
|
|
|
|
self._io_loop = io_loop
|
|
|
|
|
|
|
|
@property
|
|
|
|
def io_loop(self):
|
|
|
|
'''
|
|
|
|
Return an IOLoop
|
|
|
|
'''
|
|
|
|
if self._io_loop is None:
|
|
|
|
self._io_loop = ioloop.IOLoop.current()
|
|
|
|
return self._io_loop
|
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
def get_script_args(self): # pylint: disable=no-self-use
|
|
|
|
'''
|
|
|
|
Returns any additional arguments to pass to the CLI script
|
|
|
|
'''
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
2016-06-24 23:03:32 +00:00
|
|
|
class SaltDaemonScriptBase(SaltScriptBase, ShellTestCase):
|
2016-05-06 18:49:08 +00:00
|
|
|
'''
|
|
|
|
Base class for Salt Daemon CLI scripts
|
|
|
|
'''
|
|
|
|
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super(SaltDaemonScriptBase, self).__init__(*args, **kwargs)
|
|
|
|
self._running = multiprocessing.Event()
|
|
|
|
self._connectable = multiprocessing.Event()
|
|
|
|
self._process = None
|
|
|
|
|
|
|
|
def is_alive(self):
|
|
|
|
'''
|
|
|
|
Returns true if the process is alive
|
|
|
|
'''
|
|
|
|
return self._running.is_set()
|
|
|
|
|
|
|
|
def get_check_ports(self): # pylint: disable=no-self-use
|
|
|
|
'''
|
|
|
|
Return a list of ports to check against to ensure the daemon is running
|
|
|
|
'''
|
|
|
|
return []
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
'''
|
|
|
|
Start the daemon subprocess
|
|
|
|
'''
|
2016-07-20 21:34:58 +00:00
|
|
|
self._process = salt.utils.process.SignalHandlingMultiprocessingProcess(
|
2016-05-06 18:49:08 +00:00
|
|
|
target=self._start, args=(self._running,))
|
|
|
|
self._process.start()
|
|
|
|
self._running.set()
|
|
|
|
return True
|
|
|
|
|
|
|
|
def _start(self, running_event):
|
|
|
|
'''
|
|
|
|
The actual, coroutine aware, start method
|
|
|
|
'''
|
2016-05-20 13:15:13 +00:00
|
|
|
log.info('Starting %s %s DAEMON', self.display_name, self.__class__.__name__)
|
2016-05-06 18:49:08 +00:00
|
|
|
proc_args = [
|
|
|
|
self.get_script_path(self.cli_script_name),
|
|
|
|
'-c',
|
|
|
|
self.config_dir,
|
|
|
|
] + self.get_script_args()
|
2016-05-10 17:07:50 +00:00
|
|
|
if salt.utils.is_windows():
|
|
|
|
# Windows need the python executable to come first
|
|
|
|
proc_args.insert(0, sys.executable)
|
2016-05-06 18:49:08 +00:00
|
|
|
log.info('Running \'%s\' from %s...', ' '.join(proc_args), self.__class__.__name__)
|
|
|
|
|
2016-05-08 18:39:57 +00:00
|
|
|
try:
|
2016-05-19 23:49:14 +00:00
|
|
|
terminal = NonBlockingPopen(proc_args, cwd=CODE_DIR)
|
2016-05-08 18:39:57 +00:00
|
|
|
|
2016-05-10 00:47:09 +00:00
|
|
|
while running_event.is_set() and terminal.poll() is None:
|
2016-05-08 18:39:57 +00:00
|
|
|
# We're not actually interested in processing the output, just consume it
|
2016-05-10 00:47:09 +00:00
|
|
|
if terminal.stdout is not None:
|
|
|
|
terminal.recv()
|
|
|
|
if terminal.stderr is not None:
|
|
|
|
terminal.recv_err()
|
2016-05-10 01:19:51 +00:00
|
|
|
time.sleep(0.125)
|
2016-05-08 18:39:57 +00:00
|
|
|
except (SystemExit, KeyboardInterrupt):
|
2016-05-19 23:49:14 +00:00
|
|
|
pass
|
|
|
|
|
2016-08-23 17:35:07 +00:00
|
|
|
terminate_process_pid(terminal.pid)
|
2016-09-19 09:59:03 +00:00
|
|
|
# terminal.communicate()
|
2016-05-06 18:49:08 +00:00
|
|
|
|
|
|
|
def terminate(self):
|
|
|
|
'''
|
|
|
|
Terminate the started daemon
|
|
|
|
'''
|
2016-05-20 13:15:13 +00:00
|
|
|
log.info('Terminating %s %s DAEMON', self.display_name, self.__class__.__name__)
|
2016-05-06 18:49:08 +00:00
|
|
|
self._running.clear()
|
|
|
|
self._connectable.clear()
|
|
|
|
time.sleep(0.0125)
|
2016-08-23 17:35:07 +00:00
|
|
|
terminate_process_pid(self._process.pid)
|
2016-09-19 09:59:03 +00:00
|
|
|
# self._process.join()
|
2016-05-20 13:15:13 +00:00
|
|
|
log.info('%s %s DAEMON terminated', self.display_name, self.__class__.__name__)
|
2016-05-06 18:49:08 +00:00
|
|
|
|
|
|
|
def wait_until_running(self, timeout=None):
|
|
|
|
'''
|
|
|
|
Blocking call to wait for the daemon to start listening
|
|
|
|
'''
|
|
|
|
if self._connectable.is_set():
|
|
|
|
return True
|
|
|
|
try:
|
|
|
|
return self.io_loop.run_sync(self._wait_until_running, timeout=timeout)
|
|
|
|
except ioloop.TimeoutError:
|
|
|
|
return False
|
|
|
|
|
|
|
|
@gen.coroutine
|
|
|
|
def _wait_until_running(self):
|
|
|
|
'''
|
|
|
|
The actual, coroutine aware, call to wait for the daemon to start listening
|
|
|
|
'''
|
|
|
|
check_ports = self.get_check_ports()
|
|
|
|
log.debug(
|
|
|
|
'%s is checking the following ports to assure running status: %s',
|
|
|
|
self.__class__.__name__,
|
|
|
|
check_ports
|
|
|
|
)
|
|
|
|
while self._running.is_set():
|
|
|
|
if not check_ports:
|
|
|
|
self._connectable.set()
|
|
|
|
break
|
|
|
|
for port in set(check_ports):
|
2016-06-24 23:03:32 +00:00
|
|
|
if isinstance(port, int):
|
|
|
|
log.trace('Checking connectable status on port: %s', port)
|
|
|
|
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
|
|
conn = sock.connect_ex(('localhost', port))
|
|
|
|
if conn == 0:
|
|
|
|
log.debug('Port %s is connectable!', port)
|
|
|
|
check_ports.remove(port)
|
2016-08-31 19:09:51 +00:00
|
|
|
try:
|
|
|
|
sock.shutdown(socket.SHUT_RDWR)
|
|
|
|
sock.close()
|
|
|
|
except socket.error as exc:
|
|
|
|
if not sys.platform.startswith('darwin'):
|
|
|
|
raise
|
|
|
|
try:
|
|
|
|
if exc.errno != errno.ENOTCONN:
|
|
|
|
raise
|
|
|
|
except AttributeError:
|
|
|
|
# This is not OSX !?
|
|
|
|
pass
|
2016-06-24 23:03:32 +00:00
|
|
|
del sock
|
|
|
|
elif isinstance(port, str):
|
2016-10-20 10:48:59 +00:00
|
|
|
joined = self.run_run('manage.joined', config_dir=self.config_dir)
|
2016-07-27 19:00:01 +00:00
|
|
|
joined = [x.lstrip('- ') for x in joined]
|
2016-06-24 23:03:32 +00:00
|
|
|
if port in joined:
|
|
|
|
check_ports.remove(port)
|
2016-05-06 18:49:08 +00:00
|
|
|
yield gen.sleep(0.125)
|
|
|
|
# A final sleep to allow the ioloop to do other things
|
|
|
|
yield gen.sleep(0.125)
|
2016-05-08 18:39:57 +00:00
|
|
|
log.info('All ports checked. %s running!', self.cli_script_name)
|
2016-05-06 18:49:08 +00:00
|
|
|
raise gen.Return(self._connectable.is_set())
|
|
|
|
|
|
|
|
|
|
|
|
class SaltMinion(SaltDaemonScriptBase):
|
|
|
|
'''
|
|
|
|
Class which runs the salt-minion daemon
|
|
|
|
'''
|
|
|
|
|
|
|
|
cli_script_name = 'salt-minion'
|
|
|
|
|
|
|
|
def get_script_args(self):
|
2016-05-11 00:28:11 +00:00
|
|
|
script_args = ['-l', 'quiet']
|
|
|
|
if salt.utils.is_windows() is False:
|
|
|
|
script_args.append('--disable-keepalive')
|
|
|
|
return script_args
|
2016-05-06 18:49:08 +00:00
|
|
|
|
|
|
|
def get_check_ports(self):
|
2016-07-11 22:15:28 +00:00
|
|
|
if salt.utils.is_windows():
|
|
|
|
return set([self.config['tcp_pub_port'],
|
|
|
|
self.config['tcp_pull_port']])
|
|
|
|
else:
|
|
|
|
return set([self.config['id']])
|
2016-05-06 18:49:08 +00:00
|
|
|
|
|
|
|
|
|
|
|
class SaltMaster(SaltDaemonScriptBase):
|
|
|
|
'''
|
|
|
|
Class which runs the salt-minion daemon
|
|
|
|
'''
|
|
|
|
|
|
|
|
cli_script_name = 'salt-master'
|
|
|
|
|
|
|
|
def get_check_ports(self):
|
2016-05-11 00:39:30 +00:00
|
|
|
#return set([self.config['runtests_conn_check_port']])
|
|
|
|
return set([self.config['ret_port'],
|
2016-09-08 19:03:12 +00:00
|
|
|
self.config['publish_port']])
|
|
|
|
# Disabled along with Pytest config until fixed.
|
|
|
|
# self.config['runtests_conn_check_port']])
|
2016-05-06 18:49:08 +00:00
|
|
|
|
|
|
|
def get_script_args(self):
|
|
|
|
#return ['-l', 'debug']
|
|
|
|
return ['-l', 'quiet']
|
|
|
|
|
|
|
|
|
|
|
|
class SaltSyndic(SaltDaemonScriptBase):
|
|
|
|
'''
|
|
|
|
Class which runs the salt-syndic daemon
|
|
|
|
'''
|
|
|
|
|
|
|
|
cli_script_name = 'salt-syndic'
|
|
|
|
|
|
|
|
def get_script_args(self):
|
|
|
|
#return ['-l', 'debug']
|
|
|
|
return ['-l', 'quiet']
|
|
|
|
|
|
|
|
def get_check_ports(self):
|
2016-06-24 23:03:32 +00:00
|
|
|
return set()
|
2016-05-06 18:49:08 +00:00
|
|
|
|
|
|
|
|
2012-02-20 12:18:13 +00:00
|
|
|
class TestDaemon(object):
|
|
|
|
'''
|
|
|
|
Set up the master and minion daemons, and run related cases
|
|
|
|
'''
|
2012-12-04 13:02:56 +00:00
|
|
|
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120
|
2012-07-20 06:16:14 +00:00
|
|
|
|
2013-06-24 18:37:07 +00:00
|
|
|
def __init__(self, parser):
|
|
|
|
self.parser = parser
|
2016-07-12 19:13:57 +00:00
|
|
|
self.colors = salt.utils.get_colors(self.parser.options.no_colors is False)
|
2016-05-20 14:39:40 +00:00
|
|
|
if salt.utils.is_windows():
|
|
|
|
# There's no shell color support on windows...
|
|
|
|
for key in self.colors:
|
|
|
|
self.colors[key] = ''
|
2012-07-20 06:16:14 +00:00
|
|
|
|
2012-02-20 12:18:13 +00:00
|
|
|
def __enter__(self):
|
|
|
|
'''
|
|
|
|
Start a master and minion
|
|
|
|
'''
|
2015-08-03 12:20:08 +00:00
|
|
|
# Setup the multiprocessing logging queue listener
|
2015-12-19 19:31:19 +00:00
|
|
|
salt_log_setup.setup_multiprocessing_logging_listener(
|
2016-05-06 18:49:08 +00:00
|
|
|
self.master_opts
|
2015-12-19 19:31:19 +00:00
|
|
|
)
|
2015-08-03 12:20:08 +00:00
|
|
|
|
2012-07-26 16:14:00 +00:00
|
|
|
# Set up PATH to mockbin
|
|
|
|
self._enter_mockbin()
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2014-06-11 21:34:53 +00:00
|
|
|
if self.parser.options.transport == 'zeromq':
|
|
|
|
self.start_zeromq_daemons()
|
|
|
|
elif self.parser.options.transport == 'raet':
|
|
|
|
self.start_raet_daemons()
|
2015-07-20 22:52:48 +00:00
|
|
|
elif self.parser.options.transport == 'tcp':
|
|
|
|
self.start_tcp_daemons()
|
2012-03-29 04:14:31 +00:00
|
|
|
|
2012-11-26 05:44:18 +00:00
|
|
|
self.minion_targets = set(['minion', 'sub_minion'])
|
|
|
|
self.pre_setup_minions()
|
|
|
|
self.setup_minions()
|
2012-11-06 11:20:06 +00:00
|
|
|
|
2014-06-04 12:43:59 +00:00
|
|
|
if getattr(self.parser.options, 'ssh', False):
|
2014-05-09 21:54:21 +00:00
|
|
|
self.prep_ssh()
|
|
|
|
|
2013-06-24 18:37:07 +00:00
|
|
|
if self.parser.options.sysinfo:
|
2014-01-14 15:16:30 +00:00
|
|
|
try:
|
|
|
|
print_header(
|
|
|
|
'~~~~~~~ Versions Report ', inline=True,
|
|
|
|
width=getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
except TypeError:
|
|
|
|
print_header('~~~~~~~ Versions Report ', inline=True)
|
|
|
|
|
2013-08-16 01:01:26 +00:00
|
|
|
print('\n'.join(salt.version.versions_report()))
|
2012-11-06 18:11:26 +00:00
|
|
|
|
2014-01-14 15:16:30 +00:00
|
|
|
try:
|
|
|
|
print_header(
|
|
|
|
'~~~~~~~ Minion Grains Information ', inline=True,
|
|
|
|
width=getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
except TypeError:
|
|
|
|
print_header('~~~~~~~ Minion Grains Information ', inline=True)
|
|
|
|
|
2013-11-08 19:39:12 +00:00
|
|
|
grains = self.client.cmd('minion', 'grains.items')
|
2013-11-08 19:50:38 +00:00
|
|
|
|
|
|
|
minion_opts = self.minion_opts.copy()
|
|
|
|
minion_opts['color'] = self.parser.options.no_colors is False
|
|
|
|
salt.output.display_output(grains, 'grains', minion_opts)
|
2013-02-08 00:35:50 +00:00
|
|
|
|
2014-01-14 15:16:30 +00:00
|
|
|
try:
|
|
|
|
print_header(
|
|
|
|
'=', sep='=', inline=True,
|
|
|
|
width=getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
except TypeError:
|
|
|
|
print_header('', sep='=', inline=True)
|
2012-11-06 18:11:26 +00:00
|
|
|
|
2012-11-26 05:44:18 +00:00
|
|
|
try:
|
|
|
|
return self
|
|
|
|
finally:
|
|
|
|
self.post_setup_minions()
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2015-01-13 16:28:41 +00:00
|
|
|
def start_daemon(self, cls, opts, start_fun):
|
|
|
|
def start(cls, opts, start_fun):
|
2015-10-28 18:06:15 +00:00
|
|
|
salt.utils.appendproctitle('{0}-{1}'.format(self.__class__.__name__, cls.__name__))
|
2015-01-13 16:28:41 +00:00
|
|
|
daemon = cls(opts)
|
|
|
|
getattr(daemon, start_fun)()
|
|
|
|
process = multiprocessing.Process(target=start,
|
|
|
|
args=(cls, opts, start_fun))
|
|
|
|
process.start()
|
|
|
|
return process
|
|
|
|
|
2014-06-11 21:34:53 +00:00
|
|
|
def start_zeromq_daemons(self):
|
|
|
|
'''
|
|
|
|
Fire up the daemons used for zeromq tests
|
|
|
|
'''
|
2016-05-23 12:54:02 +00:00
|
|
|
self.log_server = ThreadedSocketServer(('localhost', SALT_LOG_PORT), SocketServerRequestHandler)
|
|
|
|
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
|
|
|
|
self.log_server_process.daemon = True
|
|
|
|
self.log_server_process.start()
|
2016-05-06 18:49:08 +00:00
|
|
|
|
|
|
|
self.master_process = SaltMaster(self.master_opts, TMP_CONF_DIR, SCRIPT_DIR)
|
2016-05-06 23:30:07 +00:00
|
|
|
self.master_process.display_name = 'salt-master'
|
2016-05-06 18:49:08 +00:00
|
|
|
self.minion_process = SaltMinion(self.minion_opts, TMP_CONF_DIR, SCRIPT_DIR)
|
2016-05-06 23:30:07 +00:00
|
|
|
self.minion_process.display_name = 'salt-minion'
|
2016-06-24 15:10:00 +00:00
|
|
|
self.sub_minion_process = SaltMinion(self.sub_minion_opts, TMP_SUB_MINION_CONF_DIR, SCRIPT_DIR)
|
|
|
|
self.sub_minion_process.display_name = 'sub salt-minion'
|
2016-05-12 12:30:16 +00:00
|
|
|
self.smaster_process = SaltMaster(self.syndic_master_opts, TMP_SYNDIC_MASTER_CONF_DIR, SCRIPT_DIR)
|
2016-05-06 23:30:07 +00:00
|
|
|
self.smaster_process.display_name = 'syndic salt-master'
|
2016-05-20 13:45:09 +00:00
|
|
|
self.syndic_process = SaltSyndic(self.syndic_opts, TMP_SYNDIC_MINION_CONF_DIR, SCRIPT_DIR)
|
2016-05-06 23:30:07 +00:00
|
|
|
self.syndic_process.display_name = 'salt-syndic'
|
2016-06-24 15:10:00 +00:00
|
|
|
for process in (self.master_process, self.minion_process, self.sub_minion_process,
|
2016-05-06 18:49:08 +00:00
|
|
|
self.smaster_process, self.syndic_process):
|
2016-05-06 23:30:07 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_YELLOW}Starting {0} ... {ENDC}'.format(
|
|
|
|
process.display_name,
|
|
|
|
**self.colors
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
2016-05-06 18:49:08 +00:00
|
|
|
process.start()
|
|
|
|
process.wait_until_running(timeout=15)
|
2016-05-06 23:30:07 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_GREEN}Starting {0} ... STARTED!\n{ENDC}'.format(
|
|
|
|
process.display_name,
|
|
|
|
**self.colors
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
2014-06-11 21:34:53 +00:00
|
|
|
|
|
|
|
def start_raet_daemons(self):
|
|
|
|
'''
|
|
|
|
Fire up the raet daemons!
|
|
|
|
'''
|
|
|
|
import salt.daemons.flo
|
2015-01-13 16:28:41 +00:00
|
|
|
self.master_process = self.start_daemon(salt.daemons.flo.IofloMaster,
|
|
|
|
self.master_opts,
|
|
|
|
'start')
|
2014-06-11 21:34:53 +00:00
|
|
|
|
2015-01-13 16:28:41 +00:00
|
|
|
self.minion_process = self.start_daemon(salt.daemons.flo.IofloMinion,
|
|
|
|
self.minion_opts,
|
|
|
|
'tune_in')
|
2014-06-11 21:34:53 +00:00
|
|
|
|
2015-01-13 16:28:41 +00:00
|
|
|
self.sub_minion_process = self.start_daemon(salt.daemons.flo.IofloMinion,
|
|
|
|
self.sub_minion_opts,
|
|
|
|
'tune_in')
|
2014-06-11 21:34:53 +00:00
|
|
|
# Wait for the daemons to all spin up
|
|
|
|
time.sleep(5)
|
|
|
|
|
2015-04-27 16:28:53 +00:00
|
|
|
# self.smaster_process = self.start_daemon(salt.daemons.flo.IofloMaster,
|
2015-01-13 16:29:33 +00:00
|
|
|
# self.syndic_master_opts,
|
|
|
|
# 'start')
|
2014-06-11 21:34:53 +00:00
|
|
|
|
|
|
|
# no raet syndic daemon yet
|
|
|
|
|
2015-07-20 22:52:48 +00:00
|
|
|
start_tcp_daemons = start_zeromq_daemons
|
|
|
|
|
2014-05-09 22:00:36 +00:00
|
|
|
def prep_ssh(self):
|
2014-05-09 21:54:21 +00:00
|
|
|
'''
|
|
|
|
Generate keys and start an ssh daemon on an alternate port
|
|
|
|
'''
|
2016-08-31 12:34:38 +00:00
|
|
|
sys.stdout.write(
|
2016-08-31 12:41:05 +00:00
|
|
|
' * {LIGHT_GREEN}Starting {0} ... {ENDC}'.format(
|
2016-08-31 12:34:38 +00:00
|
|
|
'SSH server',
|
|
|
|
**self.colors
|
|
|
|
)
|
|
|
|
)
|
2014-05-09 21:54:21 +00:00
|
|
|
keygen = salt.utils.which('ssh-keygen')
|
|
|
|
sshd = salt.utils.which('sshd')
|
|
|
|
|
|
|
|
if not (keygen and sshd):
|
|
|
|
print('WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!')
|
|
|
|
return
|
|
|
|
if not os.path.exists(TMP_CONF_DIR):
|
|
|
|
os.makedirs(TMP_CONF_DIR)
|
2014-05-09 22:00:36 +00:00
|
|
|
|
2014-05-23 20:10:08 +00:00
|
|
|
# Generate client key
|
2014-05-12 22:38:16 +00:00
|
|
|
pub_key_test_file = os.path.join(TMP_CONF_DIR, 'key_test.pub')
|
|
|
|
priv_key_test_file = os.path.join(TMP_CONF_DIR, 'key_test')
|
|
|
|
if os.path.exists(pub_key_test_file):
|
|
|
|
os.remove(pub_key_test_file)
|
|
|
|
if os.path.exists(priv_key_test_file):
|
|
|
|
os.remove(priv_key_test_file)
|
2014-05-09 21:54:21 +00:00
|
|
|
keygen_process = subprocess.Popen(
|
2014-05-12 22:38:16 +00:00
|
|
|
[keygen, '-t',
|
|
|
|
'ecdsa',
|
|
|
|
'-b',
|
|
|
|
'521',
|
|
|
|
'-C',
|
|
|
|
'"$(whoami)@$(hostname)-$(date -I)"',
|
|
|
|
'-f',
|
2014-05-23 20:10:08 +00:00
|
|
|
'key_test',
|
|
|
|
'-P',
|
2014-05-12 22:38:16 +00:00
|
|
|
''],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
close_fds=True,
|
|
|
|
cwd=TMP_CONF_DIR
|
2014-05-09 21:54:21 +00:00
|
|
|
)
|
2014-05-12 22:38:16 +00:00
|
|
|
_, keygen_err = keygen_process.communicate()
|
|
|
|
if keygen_err:
|
2015-06-06 13:47:52 +00:00
|
|
|
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_err)))
|
2014-05-12 22:38:16 +00:00
|
|
|
sshd_config_path = os.path.join(FILES, 'conf/_ssh/sshd_config')
|
2014-05-12 18:06:09 +00:00
|
|
|
shutil.copy(sshd_config_path, TMP_CONF_DIR)
|
2014-05-09 21:54:21 +00:00
|
|
|
auth_key_file = os.path.join(TMP_CONF_DIR, 'key_test.pub')
|
2014-05-23 20:10:08 +00:00
|
|
|
|
|
|
|
# Generate server key
|
|
|
|
server_key_dir = os.path.join(TMP_CONF_DIR, 'server')
|
|
|
|
if not os.path.exists(server_key_dir):
|
|
|
|
os.makedirs(server_key_dir)
|
|
|
|
server_dsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key')
|
|
|
|
server_dsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key.pub')
|
|
|
|
server_ecdsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key')
|
|
|
|
server_ecdsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key.pub')
|
|
|
|
server_ed25519_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ed25519_key')
|
|
|
|
server_ed25519_pub_key_file = os.path.join(server_key_dir, 'ssh_host.ed25519_key.pub')
|
|
|
|
|
|
|
|
for server_key_file in (server_dsa_priv_key_file,
|
|
|
|
server_dsa_pub_key_file,
|
|
|
|
server_ecdsa_priv_key_file,
|
|
|
|
server_ecdsa_pub_key_file,
|
|
|
|
server_ed25519_priv_key_file,
|
|
|
|
server_ed25519_pub_key_file):
|
|
|
|
if os.path.exists(server_key_file):
|
|
|
|
os.remove(server_key_file)
|
|
|
|
|
|
|
|
keygen_process_dsa = subprocess.Popen(
|
|
|
|
[keygen, '-t',
|
|
|
|
'dsa',
|
|
|
|
'-b',
|
|
|
|
'1024',
|
|
|
|
'-C',
|
|
|
|
'"$(whoami)@$(hostname)-$(date -I)"',
|
|
|
|
'-f',
|
|
|
|
'ssh_host_dsa_key',
|
|
|
|
'-P',
|
|
|
|
''],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
close_fds=True,
|
|
|
|
cwd=server_key_dir
|
|
|
|
)
|
|
|
|
_, keygen_dsa_err = keygen_process_dsa.communicate()
|
|
|
|
if keygen_dsa_err:
|
2015-06-06 13:47:52 +00:00
|
|
|
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_dsa_err)))
|
2014-05-23 20:10:08 +00:00
|
|
|
|
|
|
|
keygen_process_ecdsa = subprocess.Popen(
|
|
|
|
[keygen, '-t',
|
|
|
|
'ecdsa',
|
|
|
|
'-b',
|
|
|
|
'521',
|
|
|
|
'-C',
|
|
|
|
'"$(whoami)@$(hostname)-$(date -I)"',
|
|
|
|
'-f',
|
|
|
|
'ssh_host_ecdsa_key',
|
|
|
|
'-P',
|
|
|
|
''],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
close_fds=True,
|
|
|
|
cwd=server_key_dir
|
|
|
|
)
|
|
|
|
_, keygen_escda_err = keygen_process_ecdsa.communicate()
|
|
|
|
if keygen_escda_err:
|
2015-06-06 13:47:52 +00:00
|
|
|
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_escda_err)))
|
2014-05-23 20:10:08 +00:00
|
|
|
|
|
|
|
keygen_process_ed25519 = subprocess.Popen(
|
|
|
|
[keygen, '-t',
|
|
|
|
'ed25519',
|
|
|
|
'-b',
|
|
|
|
'521',
|
|
|
|
'-C',
|
|
|
|
'"$(whoami)@$(hostname)-$(date -I)"',
|
|
|
|
'-f',
|
|
|
|
'ssh_host_ed25519_key',
|
|
|
|
'-P',
|
|
|
|
''],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
close_fds=True,
|
|
|
|
cwd=server_key_dir
|
|
|
|
)
|
|
|
|
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
|
|
|
|
if keygen_ed25519_err:
|
2015-06-06 13:47:52 +00:00
|
|
|
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_ed25519_err)))
|
2014-05-23 20:10:08 +00:00
|
|
|
|
2014-11-26 17:39:18 +00:00
|
|
|
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, 'sshd_config'), 'a') as ssh_config:
|
2014-05-09 21:54:21 +00:00
|
|
|
ssh_config.write('AuthorizedKeysFile {0}\n'.format(auth_key_file))
|
2015-02-04 23:16:41 +00:00
|
|
|
if not keygen_dsa_err:
|
|
|
|
ssh_config.write('HostKey {0}\n'.format(server_dsa_priv_key_file))
|
|
|
|
if not keygen_escda_err:
|
|
|
|
ssh_config.write('HostKey {0}\n'.format(server_ecdsa_priv_key_file))
|
|
|
|
if not keygen_ed25519_err:
|
|
|
|
ssh_config.write('HostKey {0}\n'.format(server_ed25519_priv_key_file))
|
2014-10-30 06:45:52 +00:00
|
|
|
|
|
|
|
self.sshd_pidfile = os.path.join(TMP_CONF_DIR, 'sshd.pid')
|
2014-05-12 22:38:16 +00:00
|
|
|
self.sshd_process = subprocess.Popen(
|
2014-10-30 06:45:52 +00:00
|
|
|
[sshd, '-f', 'sshd_config', '-oPidFile={0}'.format(self.sshd_pidfile)],
|
2014-05-12 22:38:16 +00:00
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
close_fds=True,
|
|
|
|
cwd=TMP_CONF_DIR
|
2014-05-09 21:54:21 +00:00
|
|
|
)
|
2014-05-12 22:38:16 +00:00
|
|
|
_, sshd_err = self.sshd_process.communicate()
|
|
|
|
if sshd_err:
|
2015-06-06 13:47:52 +00:00
|
|
|
print('sshd had errors on startup: {0}'.format(salt.utils.to_str(sshd_err)))
|
2014-12-13 00:11:41 +00:00
|
|
|
else:
|
|
|
|
os.environ['SSH_DAEMON_RUNNING'] = 'True'
|
2014-05-12 22:38:16 +00:00
|
|
|
roster_path = os.path.join(FILES, 'conf/_ssh/roster')
|
|
|
|
shutil.copy(roster_path, TMP_CONF_DIR)
|
2015-11-02 23:15:02 +00:00
|
|
|
if salt.utils.is_windows():
|
|
|
|
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, 'roster'), 'a') as roster:
|
|
|
|
roster.write(' user: {0}\n'.format(win32api.GetUserName()))
|
|
|
|
roster.write(' priv: {0}/{1}'.format(TMP_CONF_DIR, 'key_test'))
|
|
|
|
else:
|
|
|
|
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, 'roster'), 'a') as roster:
|
|
|
|
roster.write(' user: {0}\n'.format(pwd.getpwuid(os.getuid()).pw_name))
|
|
|
|
roster.write(' priv: {0}/{1}'.format(TMP_CONF_DIR, 'key_test'))
|
2016-08-31 12:41:05 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
' {LIGHT_GREEN}STARTED!\n{ENDC}'.format(
|
|
|
|
**self.colors
|
|
|
|
)
|
|
|
|
)
|
2014-05-09 21:54:21 +00:00
|
|
|
|
2015-03-13 22:15:23 +00:00
|
|
|
@classmethod
|
|
|
|
def config(cls, role):
|
|
|
|
'''
|
|
|
|
Return a configuration for a master/minion/syndic.
|
|
|
|
|
|
|
|
Currently these roles are:
|
|
|
|
* master
|
|
|
|
* minion
|
|
|
|
* syndic
|
|
|
|
* syndic_master
|
|
|
|
* sub_minion
|
|
|
|
'''
|
|
|
|
return RUNTIME_CONFIGS[role]
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def config_location(cls):
|
|
|
|
return TMP_CONF_DIR
|
|
|
|
|
2013-01-11 16:21:04 +00:00
|
|
|
@property
|
|
|
|
def client(self):
|
|
|
|
'''
|
|
|
|
Return a local client which will be used for example to ping and sync
|
|
|
|
the test minions.
|
|
|
|
|
2014-04-30 19:06:27 +00:00
|
|
|
This client is defined as a class attribute because its creation needs
|
2013-01-11 16:21:04 +00:00
|
|
|
to be deferred to a latter stage. If created it on `__enter__` like it
|
|
|
|
previously was, it would not receive the master events.
|
|
|
|
'''
|
2014-10-07 11:56:20 +00:00
|
|
|
if 'runtime_client' not in RUNTIME_CONFIGS:
|
2014-10-07 13:02:25 +00:00
|
|
|
RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
|
|
|
|
mopts=self.master_opts
|
|
|
|
)
|
2014-10-07 11:56:20 +00:00
|
|
|
return RUNTIME_CONFIGS['runtime_client']
|
2013-01-11 16:21:04 +00:00
|
|
|
|
2014-06-12 23:59:55 +00:00
|
|
|
@classmethod
|
|
|
|
def transplant_configs(cls, transport='zeromq'):
|
|
|
|
if os.path.isdir(TMP_CONF_DIR):
|
|
|
|
shutil.rmtree(TMP_CONF_DIR)
|
|
|
|
os.makedirs(TMP_CONF_DIR)
|
2016-05-08 18:39:57 +00:00
|
|
|
os.makedirs(TMP_SUB_MINION_CONF_DIR)
|
|
|
|
os.makedirs(TMP_SYNDIC_MASTER_CONF_DIR)
|
2016-05-20 13:45:09 +00:00
|
|
|
os.makedirs(TMP_SYNDIC_MINION_CONF_DIR)
|
2015-08-27 04:26:07 +00:00
|
|
|
print(' * Transplanting configuration files to \'{0}\''.format(TMP_CONF_DIR))
|
2015-11-02 23:15:02 +00:00
|
|
|
if salt.utils.is_windows():
|
|
|
|
running_tests_user = win32api.GetUserName()
|
|
|
|
else:
|
|
|
|
running_tests_user = pwd.getpwuid(os.getuid()).pw_name
|
2016-06-22 23:56:50 +00:00
|
|
|
|
2016-05-09 17:57:13 +00:00
|
|
|
tests_known_hosts_file = os.path.join(TMP_CONF_DIR, 'salt_ssh_known_hosts')
|
|
|
|
with salt.utils.fopen(tests_known_hosts_file, 'w') as known_hosts:
|
2014-10-30 06:45:52 +00:00
|
|
|
known_hosts.write('')
|
2016-06-22 23:56:50 +00:00
|
|
|
|
2016-07-08 22:14:48 +00:00
|
|
|
# This master connects to syndic_master via a syndic
|
2016-06-22 23:56:50 +00:00
|
|
|
master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'master'))
|
2016-05-09 17:57:13 +00:00
|
|
|
master_opts['known_hosts_file'] = tests_known_hosts_file
|
2016-06-28 16:08:45 +00:00
|
|
|
master_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
|
2016-07-07 18:27:48 +00:00
|
|
|
master_opts['user'] = running_tests_user
|
|
|
|
master_opts['config_dir'] = TMP_CONF_DIR
|
|
|
|
master_opts['root_dir'] = os.path.join(TMP, 'rootdir')
|
|
|
|
master_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki', 'master')
|
2014-06-12 23:59:55 +00:00
|
|
|
|
2016-07-08 22:14:48 +00:00
|
|
|
# This is the syndic for master
|
|
|
|
# Let's start with a copy of the syndic master configuration
|
|
|
|
syndic_opts = copy.deepcopy(master_opts)
|
|
|
|
# Let's update with the syndic configuration
|
|
|
|
syndic_opts.update(salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic')))
|
|
|
|
syndic_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
|
|
|
|
syndic_opts['config_dir'] = TMP_SYNDIC_MINION_CONF_DIR
|
|
|
|
|
|
|
|
# This minion connects to master
|
2016-06-22 23:56:50 +00:00
|
|
|
minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'minion'))
|
2016-07-08 21:59:08 +00:00
|
|
|
minion_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
|
2014-06-12 23:59:55 +00:00
|
|
|
minion_opts['user'] = running_tests_user
|
2016-06-24 15:10:00 +00:00
|
|
|
minion_opts['config_dir'] = TMP_CONF_DIR
|
2016-07-07 18:27:48 +00:00
|
|
|
minion_opts['root_dir'] = os.path.join(TMP, 'rootdir')
|
2016-07-20 15:44:23 +00:00
|
|
|
minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki')
|
2016-07-26 22:00:07 +00:00
|
|
|
minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
|
|
|
|
minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
|
2014-06-12 23:59:55 +00:00
|
|
|
|
2016-07-08 22:14:48 +00:00
|
|
|
# This sub_minion also connects to master
|
2014-06-12 23:59:55 +00:00
|
|
|
sub_minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'sub_minion'))
|
2016-07-08 21:59:08 +00:00
|
|
|
sub_minion_opts['cachedir'] = os.path.join(TMP, 'rootdir-sub-minion', 'cache')
|
2014-06-12 23:59:55 +00:00
|
|
|
sub_minion_opts['user'] = running_tests_user
|
2016-06-24 15:10:00 +00:00
|
|
|
sub_minion_opts['config_dir'] = TMP_SUB_MINION_CONF_DIR
|
2016-05-20 13:45:09 +00:00
|
|
|
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
|
2016-07-07 18:27:48 +00:00
|
|
|
sub_minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir-sub-minion', 'pki', 'minion')
|
2016-07-26 22:00:07 +00:00
|
|
|
sub_minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
|
|
|
|
sub_minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
|
2014-06-12 23:59:55 +00:00
|
|
|
|
2016-07-08 22:14:48 +00:00
|
|
|
# This is the master of masters
|
2014-06-12 23:59:55 +00:00
|
|
|
syndic_master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic_master'))
|
2016-07-07 18:27:48 +00:00
|
|
|
syndic_master_opts['cachedir'] = os.path.join(TMP, 'rootdir-syndic-master', 'cache')
|
2014-06-12 23:59:55 +00:00
|
|
|
syndic_master_opts['user'] = running_tests_user
|
2016-06-24 15:10:00 +00:00
|
|
|
syndic_master_opts['config_dir'] = TMP_SYNDIC_MASTER_CONF_DIR
|
2016-07-07 18:27:48 +00:00
|
|
|
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
|
|
|
|
syndic_master_opts['pki_dir'] = os.path.join(TMP, 'rootdir-syndic-master', 'pki', 'master')
|
2014-06-12 23:59:55 +00:00
|
|
|
|
|
|
|
if transport == 'raet':
|
|
|
|
master_opts['transport'] = 'raet'
|
|
|
|
master_opts['raet_port'] = 64506
|
|
|
|
minion_opts['transport'] = 'raet'
|
|
|
|
minion_opts['raet_port'] = 64510
|
|
|
|
sub_minion_opts['transport'] = 'raet'
|
|
|
|
sub_minion_opts['raet_port'] = 64520
|
2015-04-24 22:27:22 +00:00
|
|
|
# syndic_master_opts['transport'] = 'raet'
|
2014-06-12 23:59:55 +00:00
|
|
|
|
2015-07-20 22:52:48 +00:00
|
|
|
if transport == 'tcp':
|
|
|
|
master_opts['transport'] = 'tcp'
|
|
|
|
minion_opts['transport'] = 'tcp'
|
|
|
|
sub_minion_opts['transport'] = 'tcp'
|
2015-07-21 22:42:33 +00:00
|
|
|
syndic_master_opts['transport'] = 'tcp'
|
2015-07-20 22:52:48 +00:00
|
|
|
|
2014-06-12 23:59:55 +00:00
|
|
|
# Set up config options that require internal data
|
2016-05-20 13:15:13 +00:00
|
|
|
master_opts['pillar_roots'] = syndic_master_opts['pillar_roots'] = {
|
2014-06-12 23:59:55 +00:00
|
|
|
'base': [os.path.join(FILES, 'pillar', 'base')]
|
|
|
|
}
|
2016-05-20 13:15:13 +00:00
|
|
|
master_opts['file_roots'] = syndic_master_opts['file_roots'] = {
|
2014-06-12 23:59:55 +00:00
|
|
|
'base': [
|
|
|
|
os.path.join(FILES, 'file', 'base'),
|
|
|
|
# Let's support runtime created files that can be used like:
|
|
|
|
# salt://my-temp-file.txt
|
|
|
|
TMP_STATE_TREE
|
|
|
|
],
|
|
|
|
# Alternate root to test __env__ choices
|
|
|
|
'prod': [
|
|
|
|
os.path.join(FILES, 'file', 'prod'),
|
|
|
|
TMP_PRODENV_STATE_TREE
|
|
|
|
]
|
|
|
|
}
|
2016-05-20 13:15:13 +00:00
|
|
|
for opts_dict in (master_opts, syndic_master_opts):
|
|
|
|
if 'ext_pillar' not in opts_dict:
|
|
|
|
opts_dict['ext_pillar'] = []
|
|
|
|
if salt.utils.is_windows():
|
|
|
|
opts_dict['ext_pillar'].append(
|
|
|
|
{'cmd_yaml': 'type {0}'.format(os.path.join(FILES, 'ext.yaml'))})
|
|
|
|
else:
|
|
|
|
opts_dict['ext_pillar'].append(
|
|
|
|
{'cmd_yaml': 'cat {0}'.format(os.path.join(FILES, 'ext.yaml'))})
|
2014-06-12 23:59:55 +00:00
|
|
|
|
2016-05-20 14:29:31 +00:00
|
|
|
for opts_dict in (master_opts, syndic_master_opts):
|
|
|
|
# We need to copy the extension modules into the new master root_dir or
|
|
|
|
# it will be prefixed by it
|
|
|
|
new_extension_modules_path = os.path.join(opts_dict['root_dir'], 'extension_modules')
|
|
|
|
if not os.path.exists(new_extension_modules_path):
|
|
|
|
shutil.copytree(
|
|
|
|
os.path.join(
|
|
|
|
INTEGRATION_TEST_DIR, 'files', 'extension_modules'
|
|
|
|
),
|
|
|
|
new_extension_modules_path
|
|
|
|
)
|
2016-05-20 14:33:40 +00:00
|
|
|
opts_dict['extension_modules'] = os.path.join(opts_dict['root_dir'], 'extension_modules')
|
2014-06-12 23:59:55 +00:00
|
|
|
|
|
|
|
# Point the config values to the correct temporary paths
|
|
|
|
for name in ('hosts', 'aliases'):
|
|
|
|
optname = '{0}.file'.format(name)
|
|
|
|
optname_path = os.path.join(TMP, name)
|
|
|
|
master_opts[optname] = optname_path
|
|
|
|
minion_opts[optname] = optname_path
|
|
|
|
sub_minion_opts[optname] = optname_path
|
2016-05-20 13:15:13 +00:00
|
|
|
syndic_opts[optname] = optname_path
|
|
|
|
syndic_master_opts[optname] = optname_path
|
2014-06-12 23:59:55 +00:00
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
for conf in (master_opts, minion_opts, sub_minion_opts, syndic_opts, syndic_master_opts):
|
2016-05-23 12:54:02 +00:00
|
|
|
if 'log_handlers_dirs' not in conf:
|
|
|
|
conf['log_handlers_dirs'] = []
|
|
|
|
conf['log_handlers_dirs'].insert(0, LOG_HANDLERS_DIR)
|
|
|
|
conf['runtests_log_port'] = SALT_LOG_PORT
|
2016-05-06 18:49:08 +00:00
|
|
|
|
2014-06-12 23:59:55 +00:00
|
|
|
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
|
|
|
|
for entry in os.listdir(CONF_DIR):
|
2016-05-20 13:15:13 +00:00
|
|
|
if entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master'):
|
2014-06-12 23:59:55 +00:00
|
|
|
# These have runtime computed values and will be handled
|
|
|
|
# differently
|
|
|
|
continue
|
|
|
|
entry_path = os.path.join(CONF_DIR, entry)
|
|
|
|
if os.path.isfile(entry_path):
|
|
|
|
shutil.copy(
|
|
|
|
entry_path,
|
|
|
|
os.path.join(TMP_CONF_DIR, entry)
|
|
|
|
)
|
|
|
|
elif os.path.isdir(entry_path):
|
|
|
|
shutil.copytree(
|
|
|
|
entry_path,
|
|
|
|
os.path.join(TMP_CONF_DIR, entry)
|
|
|
|
)
|
|
|
|
|
2016-05-20 13:15:13 +00:00
|
|
|
for entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master'):
|
2014-06-12 23:59:55 +00:00
|
|
|
computed_config = copy.deepcopy(locals()['{0}_opts'.format(entry)])
|
2016-08-11 16:45:24 +00:00
|
|
|
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, entry), 'w') as fp_:
|
|
|
|
fp_.write(yaml.dump(computed_config, default_flow_style=False))
|
2016-05-06 18:49:08 +00:00
|
|
|
sub_minion_computed_config = copy.deepcopy(sub_minion_opts)
|
2016-05-08 18:39:57 +00:00
|
|
|
salt.utils.fopen(os.path.join(TMP_SUB_MINION_CONF_DIR, 'minion'), 'w').write(
|
2016-05-06 18:49:08 +00:00
|
|
|
yaml.dump(sub_minion_computed_config, default_flow_style=False)
|
|
|
|
)
|
2016-05-08 18:39:57 +00:00
|
|
|
shutil.copyfile(os.path.join(TMP_CONF_DIR, 'master'), os.path.join(TMP_SUB_MINION_CONF_DIR, 'master'))
|
2016-05-20 13:15:13 +00:00
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
syndic_master_computed_config = copy.deepcopy(syndic_master_opts)
|
2016-05-08 18:39:57 +00:00
|
|
|
salt.utils.fopen(os.path.join(TMP_SYNDIC_MASTER_CONF_DIR, 'master'), 'w').write(
|
2016-05-06 18:49:08 +00:00
|
|
|
yaml.dump(syndic_master_computed_config, default_flow_style=False)
|
|
|
|
)
|
2016-05-20 13:15:13 +00:00
|
|
|
syndic_computed_config = copy.deepcopy(syndic_opts)
|
2016-05-20 13:45:09 +00:00
|
|
|
salt.utils.fopen(os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'minion'), 'w').write(
|
2016-05-20 13:15:13 +00:00
|
|
|
yaml.dump(syndic_computed_config, default_flow_style=False)
|
|
|
|
)
|
2016-05-20 13:45:09 +00:00
|
|
|
shutil.copyfile(os.path.join(TMP_CONF_DIR, 'master'), os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'master'))
|
2014-06-12 23:59:55 +00:00
|
|
|
# <---- Transcribe Configuration -----------------------------------------------------------------------------
|
|
|
|
|
2014-06-13 18:31:01 +00:00
|
|
|
# ----- Verify Environment ---------------------------------------------------------------------------------->
|
|
|
|
master_opts = salt.config.master_config(os.path.join(TMP_CONF_DIR, 'master'))
|
2016-05-20 13:45:09 +00:00
|
|
|
minion_opts = salt.config.minion_config(os.path.join(TMP_CONF_DIR, 'minion'))
|
2014-06-13 18:31:01 +00:00
|
|
|
syndic_opts = salt.config.syndic_config(
|
2016-05-20 13:45:09 +00:00
|
|
|
os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'master'),
|
|
|
|
os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'minion'),
|
2014-06-13 18:31:01 +00:00
|
|
|
)
|
2016-05-20 13:45:09 +00:00
|
|
|
sub_minion_opts = salt.config.minion_config(os.path.join(TMP_SUB_MINION_CONF_DIR, 'minion'))
|
|
|
|
syndic_master_opts = salt.config.master_config(os.path.join(TMP_SYNDIC_MASTER_CONF_DIR, 'master'))
|
2014-06-13 18:31:01 +00:00
|
|
|
|
2014-11-07 20:39:49 +00:00
|
|
|
RUNTIME_CONFIGS['master'] = freeze(master_opts)
|
|
|
|
RUNTIME_CONFIGS['minion'] = freeze(minion_opts)
|
|
|
|
RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts)
|
|
|
|
RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts)
|
|
|
|
RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts)
|
|
|
|
|
2014-06-13 18:31:01 +00:00
|
|
|
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
|
|
|
|
os.path.join(master_opts['pki_dir'], 'minions_pre'),
|
|
|
|
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
|
2014-09-22 08:01:24 +00:00
|
|
|
os.path.join(master_opts['pki_dir'], 'minions_denied'),
|
2014-06-13 18:31:01 +00:00
|
|
|
os.path.join(master_opts['cachedir'], 'jobs'),
|
2014-07-07 21:56:16 +00:00
|
|
|
os.path.join(master_opts['cachedir'], 'raet'),
|
2014-10-07 11:19:58 +00:00
|
|
|
os.path.join(master_opts['root_dir'], 'cache', 'tokens'),
|
2014-06-13 18:31:01 +00:00
|
|
|
os.path.join(syndic_master_opts['pki_dir'], 'minions'),
|
|
|
|
os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'),
|
|
|
|
os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'),
|
|
|
|
os.path.join(syndic_master_opts['cachedir'], 'jobs'),
|
2014-07-07 21:56:16 +00:00
|
|
|
os.path.join(syndic_master_opts['cachedir'], 'raet'),
|
2014-10-07 11:19:58 +00:00
|
|
|
os.path.join(syndic_master_opts['root_dir'], 'cache', 'tokens'),
|
2014-06-13 18:31:01 +00:00
|
|
|
os.path.join(master_opts['pki_dir'], 'accepted'),
|
|
|
|
os.path.join(master_opts['pki_dir'], 'rejected'),
|
|
|
|
os.path.join(master_opts['pki_dir'], 'pending'),
|
|
|
|
os.path.join(syndic_master_opts['pki_dir'], 'accepted'),
|
|
|
|
os.path.join(syndic_master_opts['pki_dir'], 'rejected'),
|
|
|
|
os.path.join(syndic_master_opts['pki_dir'], 'pending'),
|
2014-07-07 21:56:16 +00:00
|
|
|
os.path.join(syndic_master_opts['cachedir'], 'raet'),
|
2014-06-13 18:31:01 +00:00
|
|
|
|
|
|
|
os.path.join(minion_opts['pki_dir'], 'accepted'),
|
|
|
|
os.path.join(minion_opts['pki_dir'], 'rejected'),
|
|
|
|
os.path.join(minion_opts['pki_dir'], 'pending'),
|
2014-07-07 21:56:16 +00:00
|
|
|
os.path.join(minion_opts['cachedir'], 'raet'),
|
2014-06-13 18:31:01 +00:00
|
|
|
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
|
|
|
|
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
|
|
|
|
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
|
2014-07-07 21:56:16 +00:00
|
|
|
os.path.join(sub_minion_opts['cachedir'], 'raet'),
|
2014-06-13 18:31:01 +00:00
|
|
|
os.path.dirname(master_opts['log_file']),
|
2014-06-13 18:32:59 +00:00
|
|
|
minion_opts['extension_modules'],
|
|
|
|
sub_minion_opts['extension_modules'],
|
2014-06-13 18:31:01 +00:00
|
|
|
sub_minion_opts['pki_dir'],
|
|
|
|
master_opts['sock_dir'],
|
|
|
|
syndic_master_opts['sock_dir'],
|
|
|
|
sub_minion_opts['sock_dir'],
|
|
|
|
minion_opts['sock_dir'],
|
|
|
|
TMP_STATE_TREE,
|
|
|
|
TMP_PRODENV_STATE_TREE,
|
|
|
|
TMP,
|
|
|
|
],
|
|
|
|
running_tests_user)
|
2014-06-13 23:22:03 +00:00
|
|
|
|
|
|
|
cls.master_opts = master_opts
|
|
|
|
cls.minion_opts = minion_opts
|
|
|
|
cls.sub_minion_opts = sub_minion_opts
|
|
|
|
cls.syndic_opts = syndic_opts
|
|
|
|
cls.syndic_master_opts = syndic_master_opts
|
2014-06-13 18:31:01 +00:00
|
|
|
# <---- Verify Environment -----------------------------------------------------------------------------------
|
|
|
|
|
2012-02-20 12:18:13 +00:00
|
|
|
def __exit__(self, type, value, traceback):
|
|
|
|
'''
|
|
|
|
Kill the minion and master processes
|
|
|
|
'''
|
2016-06-24 15:10:00 +00:00
|
|
|
self.sub_minion_process.terminate()
|
2016-05-06 18:49:08 +00:00
|
|
|
self.minion_process.terminate()
|
|
|
|
self.master_process.terminate()
|
2014-06-12 18:33:56 +00:00
|
|
|
try:
|
2016-05-06 18:49:08 +00:00
|
|
|
self.syndic_process.terminate()
|
2014-06-12 18:33:56 +00:00
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
try:
|
2016-05-06 18:49:08 +00:00
|
|
|
self.smaster_process.terminate()
|
2014-06-12 18:33:56 +00:00
|
|
|
except AttributeError:
|
|
|
|
pass
|
2016-05-06 18:49:08 +00:00
|
|
|
#salt.utils.process.clean_proc(self.sub_minion_process, wait_for_kill=50)
|
|
|
|
#self.sub_minion_process.join()
|
|
|
|
#salt.utils.process.clean_proc(self.minion_process, wait_for_kill=50)
|
|
|
|
#self.minion_process.join()
|
|
|
|
#salt.utils.process.clean_proc(self.master_process, wait_for_kill=50)
|
|
|
|
#self.master_process.join()
|
|
|
|
#try:
|
|
|
|
# salt.utils.process.clean_proc(self.syndic_process, wait_for_kill=50)
|
|
|
|
# self.syndic_process.join()
|
|
|
|
#except AttributeError:
|
|
|
|
# pass
|
|
|
|
#try:
|
|
|
|
# salt.utils.process.clean_proc(self.smaster_process, wait_for_kill=50)
|
|
|
|
# self.smaster_process.join()
|
|
|
|
#except AttributeError:
|
|
|
|
# pass
|
2016-05-23 12:54:02 +00:00
|
|
|
self.log_server.server_close()
|
|
|
|
self.log_server.shutdown()
|
2012-07-26 16:14:00 +00:00
|
|
|
self._exit_mockbin()
|
2014-05-12 22:38:16 +00:00
|
|
|
self._exit_ssh()
|
2016-05-23 12:54:02 +00:00
|
|
|
self.log_server_process.join()
|
2015-08-03 20:37:56 +00:00
|
|
|
# Shutdown the multiprocessing logging queue listener
|
2015-10-29 16:26:14 +00:00
|
|
|
salt_log_setup.shutdown_multiprocessing_logging()
|
2016-10-05 08:14:27 +00:00
|
|
|
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2012-11-26 05:44:18 +00:00
|
|
|
def pre_setup_minions(self):
|
2013-01-10 07:25:02 +00:00
|
|
|
'''
|
2012-11-26 05:44:18 +00:00
|
|
|
Subclass this method for additional minion setups.
|
2013-01-10 07:25:02 +00:00
|
|
|
'''
|
2012-11-26 05:44:18 +00:00
|
|
|
|
|
|
|
def setup_minions(self):
|
2016-05-09 00:27:09 +00:00
|
|
|
'''
|
|
|
|
Minions setup routines
|
|
|
|
'''
|
2012-11-26 05:44:18 +00:00
|
|
|
|
|
|
|
def post_setup_minions(self):
|
2013-01-20 01:22:54 +00:00
|
|
|
'''
|
2012-11-26 05:44:18 +00:00
|
|
|
Subclass this method to execute code after the minions have been setup
|
2013-01-20 01:22:54 +00:00
|
|
|
'''
|
2012-11-26 05:44:18 +00:00
|
|
|
|
2012-07-26 16:14:00 +00:00
|
|
|
def _enter_mockbin(self):
|
|
|
|
path = os.environ.get('PATH', '')
|
|
|
|
path_items = path.split(os.pathsep)
|
|
|
|
if MOCKBIN not in path_items:
|
|
|
|
path_items.insert(0, MOCKBIN)
|
|
|
|
os.environ['PATH'] = os.pathsep.join(path_items)
|
|
|
|
|
2014-05-12 22:38:16 +00:00
|
|
|
def _exit_ssh(self):
|
|
|
|
if hasattr(self, 'sshd_process'):
|
|
|
|
try:
|
|
|
|
self.sshd_process.kill()
|
2014-10-30 06:45:52 +00:00
|
|
|
except OSError as exc:
|
|
|
|
if exc.errno != 3:
|
|
|
|
raise
|
2014-11-26 23:38:34 +00:00
|
|
|
with salt.utils.fopen(self.sshd_pidfile) as fhr:
|
|
|
|
try:
|
|
|
|
os.kill(int(fhr.read()), signal.SIGKILL)
|
|
|
|
except OSError as exc:
|
|
|
|
if exc.errno != 3:
|
|
|
|
raise
|
2014-05-12 22:38:16 +00:00
|
|
|
|
2012-07-26 16:14:00 +00:00
|
|
|
def _exit_mockbin(self):
|
|
|
|
path = os.environ.get('PATH', '')
|
|
|
|
path_items = path.split(os.pathsep)
|
|
|
|
try:
|
|
|
|
path_items.remove(MOCKBIN)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
os.environ['PATH'] = os.pathsep.join(path_items)
|
|
|
|
|
2014-06-14 13:03:38 +00:00
|
|
|
@classmethod
|
|
|
|
def clean(cls):
|
2012-04-04 05:14:26 +00:00
|
|
|
'''
|
|
|
|
Clean out the tmp files
|
|
|
|
'''
|
2016-05-19 17:13:03 +00:00
|
|
|
def remove_readonly(func, path, excinfo):
|
|
|
|
os.chmod(path, stat.S_IWRITE)
|
|
|
|
func(path)
|
|
|
|
|
2013-11-02 22:40:09 +00:00
|
|
|
for dirname in (TMP, TMP_STATE_TREE, TMP_PRODENV_STATE_TREE):
|
|
|
|
if os.path.isdir(dirname):
|
2016-05-19 17:13:03 +00:00
|
|
|
shutil.rmtree(dirname, onerror=remove_readonly)
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2012-11-09 15:25:16 +00:00
|
|
|
def wait_for_jid(self, targets, jid, timeout=120):
|
2012-11-26 05:44:18 +00:00
|
|
|
time.sleep(1) # Allow some time for minions to accept jobs
|
2012-11-17 17:37:53 +00:00
|
|
|
now = datetime.now()
|
|
|
|
expire = now + timedelta(seconds=timeout)
|
2012-11-26 05:44:18 +00:00
|
|
|
job_finished = False
|
2012-11-17 17:37:53 +00:00
|
|
|
while now <= expire:
|
2012-11-09 15:25:16 +00:00
|
|
|
running = self.__client_job_running(targets, jid)
|
2014-01-14 15:16:30 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
if not running and job_finished is False:
|
|
|
|
# Let's not have false positives and wait one more seconds
|
|
|
|
job_finished = True
|
|
|
|
elif not running and job_finished is True:
|
2012-11-09 15:25:16 +00:00
|
|
|
return True
|
2012-11-26 05:44:18 +00:00
|
|
|
elif running and job_finished is True:
|
|
|
|
job_finished = False
|
|
|
|
|
|
|
|
if job_finished is False:
|
|
|
|
sys.stdout.write(
|
2014-12-25 07:06:29 +00:00
|
|
|
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
|
2012-11-26 05:44:18 +00:00
|
|
|
'{0}'.format(expire - now).rsplit('.', 1)[0],
|
|
|
|
', '.join(running),
|
|
|
|
**self.colors
|
|
|
|
)
|
2012-11-09 15:25:16 +00:00
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.flush()
|
2012-11-09 15:25:16 +00:00
|
|
|
time.sleep(1)
|
2012-11-17 17:37:53 +00:00
|
|
|
now = datetime.now()
|
2013-11-27 13:07:24 +00:00
|
|
|
else: # pylint: disable=W0120
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.write(
|
2014-12-25 07:06:29 +00:00
|
|
|
'\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information '
|
2012-11-26 05:44:18 +00:00
|
|
|
'back\n'.format(**self.colors)
|
|
|
|
)
|
2012-11-09 15:25:16 +00:00
|
|
|
sys.stdout.flush()
|
|
|
|
return False
|
|
|
|
|
|
|
|
def __client_job_running(self, targets, jid):
|
|
|
|
running = self.client.cmd(
|
2016-12-01 22:21:49 +00:00
|
|
|
list(targets), 'saltutil.running', tgt_type='list'
|
2012-11-09 15:25:16 +00:00
|
|
|
)
|
|
|
|
return [
|
2014-11-21 19:41:22 +00:00
|
|
|
k for (k, v) in six.iteritems(running) if v and v[0]['jid'] == jid
|
2012-11-09 15:25:16 +00:00
|
|
|
]
|
|
|
|
|
2012-11-26 05:44:18 +00:00
|
|
|
def wait_for_minion_connections(self, targets, timeout):
|
2015-10-29 16:26:14 +00:00
|
|
|
salt.utils.appendproctitle('WaitForMinionConnections')
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
' {LIGHT_BLUE}*{ENDC} Waiting at most {0} for minions({1}) to '
|
|
|
|
'connect back\n'.format(
|
|
|
|
(timeout > 60 and
|
|
|
|
timedelta(seconds=timeout) or
|
|
|
|
'{0} secs'.format(timeout)),
|
|
|
|
', '.join(targets),
|
|
|
|
**self.colors
|
|
|
|
)
|
2012-11-06 11:20:06 +00:00
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.flush()
|
2012-11-06 11:20:06 +00:00
|
|
|
expected_connections = set(targets)
|
2012-11-26 05:44:18 +00:00
|
|
|
now = datetime.now()
|
|
|
|
expire = now + timedelta(seconds=timeout)
|
|
|
|
while now <= expire:
|
2014-01-14 15:16:30 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.write(
|
2014-12-25 07:06:29 +00:00
|
|
|
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
|
2012-11-26 05:44:18 +00:00
|
|
|
'{0}'.format(expire - now).rsplit('.', 1)[0],
|
|
|
|
', '.join(expected_connections),
|
|
|
|
**self.colors
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
2012-11-26 04:32:25 +00:00
|
|
|
|
2015-04-04 22:20:52 +00:00
|
|
|
try:
|
|
|
|
responses = self.client.cmd(
|
2016-12-01 22:21:49 +00:00
|
|
|
list(expected_connections), 'test.ping', tgt_type='list',
|
2015-04-04 22:20:52 +00:00
|
|
|
)
|
|
|
|
# we'll get this exception if the master process hasn't finished starting yet
|
|
|
|
except SaltClientError:
|
|
|
|
time.sleep(0.1)
|
2015-04-23 16:55:42 +00:00
|
|
|
now = datetime.now()
|
2015-04-04 22:20:52 +00:00
|
|
|
continue
|
2012-11-26 05:44:18 +00:00
|
|
|
for target in responses:
|
2012-11-06 11:20:06 +00:00
|
|
|
if target not in expected_connections:
|
|
|
|
# Someone(minion) else "listening"?
|
|
|
|
continue
|
|
|
|
expected_connections.remove(target)
|
2014-01-14 15:16:30 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns',
|
|
|
|
PNUM)
|
|
|
|
)
|
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
' {LIGHT_GREEN}*{ENDC} {0} connected.\n'.format(
|
2012-11-26 04:32:25 +00:00
|
|
|
target, **self.colors
|
|
|
|
)
|
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.flush()
|
|
|
|
|
2012-11-06 11:20:06 +00:00
|
|
|
if not expected_connections:
|
2012-11-26 05:44:18 +00:00
|
|
|
return
|
|
|
|
|
2012-11-06 11:20:06 +00:00
|
|
|
time.sleep(1)
|
2012-11-26 05:44:18 +00:00
|
|
|
now = datetime.now()
|
2013-11-27 13:07:24 +00:00
|
|
|
else: # pylint: disable=W0120
|
2012-11-26 05:44:18 +00:00
|
|
|
print(
|
2014-12-25 07:06:29 +00:00
|
|
|
'\n {LIGHT_RED}*{ENDC} WARNING: Minions failed to connect '
|
2012-11-26 05:44:18 +00:00
|
|
|
'back. Tests requiring them WILL fail'.format(**self.colors)
|
|
|
|
)
|
2014-01-14 15:16:30 +00:00
|
|
|
try:
|
|
|
|
print_header(
|
|
|
|
'=', sep='=', inline=True,
|
|
|
|
width=getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
|
|
|
|
)
|
|
|
|
except TypeError:
|
|
|
|
print_header('=', sep='=', inline=True)
|
2012-11-26 05:44:18 +00:00
|
|
|
raise SystemExit()
|
2012-11-06 11:20:06 +00:00
|
|
|
|
2014-01-28 12:40:03 +00:00
|
|
|
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
|
|
|
|
if not timeout:
|
|
|
|
timeout = 120
|
2012-11-06 11:20:06 +00:00
|
|
|
# Let's sync all connected minions
|
2012-11-26 05:44:18 +00:00
|
|
|
print(
|
2014-01-28 12:40:03 +00:00
|
|
|
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} '
|
|
|
|
'(saltutil.sync_{1})'.format(
|
2012-11-26 05:44:18 +00:00
|
|
|
', '.join(targets),
|
2014-01-28 12:40:03 +00:00
|
|
|
modules_kind,
|
2012-11-26 05:44:18 +00:00
|
|
|
**self.colors
|
|
|
|
)
|
|
|
|
)
|
2012-11-06 11:20:06 +00:00
|
|
|
syncing = set(targets)
|
|
|
|
jid_info = self.client.run_job(
|
2014-01-28 12:40:03 +00:00
|
|
|
list(targets), 'saltutil.sync_{0}'.format(modules_kind),
|
2016-12-01 22:21:49 +00:00
|
|
|
tgt_type='list',
|
2014-11-20 15:46:13 +00:00
|
|
|
timeout=999999999999999,
|
2012-11-06 11:20:06 +00:00
|
|
|
)
|
|
|
|
|
2012-11-26 05:44:18 +00:00
|
|
|
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
|
|
|
|
print(
|
2014-12-25 07:06:29 +00:00
|
|
|
' {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. '
|
2014-01-28 12:40:03 +00:00
|
|
|
'Tests requiring these {0} WILL fail'.format(
|
|
|
|
modules_kind, **self.colors)
|
2012-11-26 05:44:18 +00:00
|
|
|
)
|
|
|
|
raise SystemExit()
|
2012-11-09 15:25:16 +00:00
|
|
|
|
2012-11-06 11:20:06 +00:00
|
|
|
while syncing:
|
2013-01-11 19:03:53 +00:00
|
|
|
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
|
2012-11-06 11:20:06 +00:00
|
|
|
if rdata:
|
2014-11-21 19:41:22 +00:00
|
|
|
for name, output in six.iteritems(rdata):
|
2013-01-11 19:03:53 +00:00
|
|
|
if not output['ret']:
|
|
|
|
# Already synced!?
|
|
|
|
syncing.remove(name)
|
|
|
|
continue
|
|
|
|
|
2014-11-21 19:41:22 +00:00
|
|
|
if isinstance(output['ret'], six.string_types):
|
2013-11-10 19:57:41 +00:00
|
|
|
# An errors has occurred
|
|
|
|
print(
|
2014-12-25 07:06:29 +00:00
|
|
|
' {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: '
|
2014-01-28 12:40:03 +00:00
|
|
|
'{1}'.format(
|
|
|
|
name, output['ret'],
|
|
|
|
modules_kind,
|
|
|
|
**self.colors)
|
2013-11-10 19:57:41 +00:00
|
|
|
)
|
|
|
|
return False
|
|
|
|
|
2012-11-26 05:44:18 +00:00
|
|
|
print(
|
2014-01-28 12:40:03 +00:00
|
|
|
' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: '
|
2013-01-11 19:03:53 +00:00
|
|
|
'{1}'.format(
|
2014-01-28 12:40:03 +00:00
|
|
|
name,
|
|
|
|
', '.join(output['ret']),
|
|
|
|
modules_kind, **self.colors
|
2013-01-11 19:03:53 +00:00
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
)
|
2012-11-06 11:20:06 +00:00
|
|
|
# Synced!
|
|
|
|
try:
|
|
|
|
syncing.remove(name)
|
|
|
|
except KeyError:
|
2012-11-26 05:44:18 +00:00
|
|
|
print(
|
2014-12-25 07:06:29 +00:00
|
|
|
' {LIGHT_RED}*{ENDC} {0} already synced??? '
|
2012-11-26 05:44:18 +00:00
|
|
|
'{1}'.format(name, output, **self.colors)
|
|
|
|
)
|
|
|
|
return True
|
2012-11-06 11:20:06 +00:00
|
|
|
|
2014-01-28 12:40:03 +00:00
|
|
|
def sync_minion_states(self, targets, timeout=None):
|
2015-10-29 16:26:14 +00:00
|
|
|
salt.utils.appendproctitle('SyncMinionStates')
|
2014-01-28 12:40:03 +00:00
|
|
|
self.sync_minion_modules_('states', targets, timeout=timeout)
|
|
|
|
|
|
|
|
def sync_minion_modules(self, targets, timeout=None):
|
2015-10-29 16:26:14 +00:00
|
|
|
salt.utils.appendproctitle('SyncMinionModules')
|
2014-01-28 12:40:03 +00:00
|
|
|
self.sync_minion_modules_('modules', targets, timeout=timeout)
|
|
|
|
|
2015-10-31 19:43:17 +00:00
|
|
|
def sync_minion_grains(self, targets, timeout=None):
|
|
|
|
self.sync_minion_modules_('grains', targets, timeout=timeout)
|
|
|
|
|
2012-05-28 03:00:10 +00:00
|
|
|
|
2013-08-30 17:24:15 +00:00
|
|
|
class AdaptedConfigurationTestCaseMixIn(object):
|
|
|
|
|
|
|
|
__slots__ = ()
|
|
|
|
|
2014-10-07 11:19:58 +00:00
|
|
|
def get_config(self, config_for, from_scratch=False):
|
|
|
|
if from_scratch:
|
|
|
|
if config_for in ('master', 'syndic_master'):
|
|
|
|
return salt.config.master_config(self.get_config_file_path(config_for))
|
|
|
|
elif config_for in ('minion', 'sub_minion'):
|
|
|
|
return salt.config.minion_config(self.get_config_file_path(config_for))
|
|
|
|
elif config_for in ('syndic',):
|
|
|
|
return salt.config.syndic_config(
|
|
|
|
self.get_config_file_path(config_for),
|
|
|
|
self.get_config_file_path('minion')
|
|
|
|
)
|
|
|
|
elif config_for == 'client_config':
|
|
|
|
return salt.config.client_config(self.get_config_file_path('master'))
|
|
|
|
|
2014-10-06 18:59:42 +00:00
|
|
|
if config_for not in RUNTIME_CONFIGS:
|
2014-10-06 19:05:46 +00:00
|
|
|
if config_for in ('master', 'syndic_master'):
|
2014-10-06 18:59:42 +00:00
|
|
|
RUNTIME_CONFIGS[config_for] = freeze(
|
|
|
|
salt.config.master_config(self.get_config_file_path(config_for))
|
|
|
|
)
|
|
|
|
elif config_for in ('minion', 'sub_minion'):
|
|
|
|
RUNTIME_CONFIGS[config_for] = freeze(
|
|
|
|
salt.config.minion_config(self.get_config_file_path(config_for))
|
|
|
|
)
|
2014-10-06 19:05:46 +00:00
|
|
|
elif config_for in ('syndic',):
|
|
|
|
RUNTIME_CONFIGS[config_for] = freeze(
|
2014-10-06 19:09:43 +00:00
|
|
|
salt.config.syndic_config(
|
|
|
|
self.get_config_file_path(config_for),
|
|
|
|
self.get_config_file_path('minion')
|
|
|
|
)
|
2014-10-06 19:05:46 +00:00
|
|
|
)
|
2014-10-07 11:19:58 +00:00
|
|
|
elif config_for == 'client_config':
|
|
|
|
RUNTIME_CONFIGS[config_for] = freeze(
|
|
|
|
salt.config.client_config(self.get_config_file_path('master'))
|
|
|
|
)
|
2014-10-06 18:59:42 +00:00
|
|
|
return RUNTIME_CONFIGS[config_for]
|
|
|
|
|
2013-08-30 17:24:15 +00:00
|
|
|
def get_config_dir(self):
|
|
|
|
return TMP_CONF_DIR
|
|
|
|
|
|
|
|
def get_config_file_path(self, filename):
|
2016-05-06 18:49:08 +00:00
|
|
|
if filename == 'syndic_master':
|
2016-05-12 12:30:16 +00:00
|
|
|
return os.path.join(TMP_SYNDIC_MASTER_CONF_DIR, 'master')
|
|
|
|
if filename == 'syndic':
|
2016-05-20 13:45:09 +00:00
|
|
|
return os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'minion')
|
2016-05-06 18:49:08 +00:00
|
|
|
if filename == 'sub_minion':
|
2016-05-12 12:30:16 +00:00
|
|
|
return os.path.join(TMP_SUB_MINION_CONF_DIR, 'minion')
|
2014-06-12 23:59:55 +00:00
|
|
|
return os.path.join(TMP_CONF_DIR, filename)
|
2013-08-30 17:24:15 +00:00
|
|
|
|
2014-07-09 16:14:23 +00:00
|
|
|
@property
|
|
|
|
def master_opts(self):
|
|
|
|
'''
|
2016-05-09 17:57:13 +00:00
|
|
|
Return the options used for the master
|
2014-07-09 16:14:23 +00:00
|
|
|
'''
|
2014-10-06 18:59:42 +00:00
|
|
|
return self.get_config('master')
|
2013-08-30 17:24:15 +00:00
|
|
|
|
2014-07-15 00:41:16 +00:00
|
|
|
|
2016-05-19 19:41:23 +00:00
|
|
|
class SaltMinionEventAssertsMixIn(object):
|
|
|
|
'''
|
|
|
|
Asserts to verify that a given event was seen
|
|
|
|
'''
|
|
|
|
|
|
|
|
def __new__(cls, *args, **kwargs):
|
|
|
|
# We have to cross-call to re-gen a config
|
|
|
|
cls.q = multiprocessing.Queue()
|
|
|
|
cls.fetch_proc = multiprocessing.Process(target=cls._fetch, args=(cls.q,))
|
|
|
|
cls.fetch_proc.start()
|
|
|
|
return object.__new__(cls)
|
|
|
|
|
2016-09-19 10:08:58 +00:00
|
|
|
def __exit__(self, *args, **kwargs):
|
|
|
|
self.fetch_proc.join()
|
|
|
|
|
2016-05-19 19:41:23 +00:00
|
|
|
@staticmethod
|
|
|
|
def _fetch(q):
|
|
|
|
'''
|
|
|
|
Collect events and store them
|
|
|
|
'''
|
|
|
|
def _clean_queue():
|
|
|
|
print('Cleaning queue!')
|
|
|
|
while not q.empty():
|
|
|
|
queue_item = q.get()
|
|
|
|
queue_item.task_done()
|
|
|
|
|
|
|
|
atexit.register(_clean_queue)
|
|
|
|
a_config = AdaptedConfigurationTestCaseMixIn()
|
|
|
|
event = salt.utils.event.get_event('minion', sock_dir=a_config.get_config('minion')['sock_dir'], opts=a_config.get_config('minion'))
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
events = event.get_event(full=False)
|
|
|
|
except Exception:
|
|
|
|
# This is broad but we'll see all kinds of issues right now
|
|
|
|
# if we drop the proc out from under the socket while we're reading
|
|
|
|
pass
|
|
|
|
q.put(events)
|
|
|
|
|
|
|
|
def assertMinionEventFired(self, tag):
|
|
|
|
#TODO
|
|
|
|
raise salt.exceptions.NotImplemented('assertMinionEventFired() not implemented')
|
|
|
|
|
|
|
|
def assertMinionEventReceived(self, desired_event):
|
|
|
|
queue_wait = 5 # 2.5s
|
|
|
|
while self.q.empty():
|
|
|
|
time.sleep(0.5) # Wait for events to be pushed into the queue
|
|
|
|
queue_wait -= 1
|
|
|
|
if queue_wait <= 0:
|
|
|
|
raise AssertionError('Queue wait timer expired')
|
|
|
|
while not self.q.empty(): # This is not thread-safe and may be inaccurate
|
|
|
|
event = self.q.get()
|
|
|
|
if isinstance(event, dict):
|
|
|
|
event.pop('_stamp')
|
|
|
|
if desired_event == event:
|
|
|
|
self.fetch_proc.terminate()
|
|
|
|
return True
|
|
|
|
self.fetch_proc.terminate()
|
|
|
|
raise AssertionError('Event {0} was not received by minion'.format(desired_event))
|
|
|
|
|
|
|
|
|
2013-08-30 17:24:15 +00:00
|
|
|
class SaltClientTestCaseMixIn(AdaptedConfigurationTestCaseMixIn):
|
2012-11-07 13:40:39 +00:00
|
|
|
|
2012-12-04 11:51:37 +00:00
|
|
|
_salt_client_config_file_name_ = 'master'
|
2015-06-06 13:47:52 +00:00
|
|
|
__slots__ = ()
|
2012-11-07 13:40:39 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def client(self):
|
2014-10-07 11:56:20 +00:00
|
|
|
if 'runtime_client' not in RUNTIME_CONFIGS:
|
|
|
|
RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
|
2014-10-07 13:02:25 +00:00
|
|
|
mopts=self.get_config(self._salt_client_config_file_name_, from_scratch=True)
|
2014-10-06 20:38:10 +00:00
|
|
|
)
|
2014-10-07 11:56:20 +00:00
|
|
|
return RUNTIME_CONFIGS['runtime_client']
|
2012-12-04 11:51:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ModuleCase(TestCase, SaltClientTestCaseMixIn):
|
|
|
|
'''
|
|
|
|
Execute a module function
|
|
|
|
'''
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2016-08-02 00:26:18 +00:00
|
|
|
def runTest(self):
|
2016-08-02 07:45:48 +00:00
|
|
|
'''
|
|
|
|
TODO remove after salt-testing PR #74 is merged and deployed
|
|
|
|
'''
|
2016-08-20 13:22:52 +00:00
|
|
|
try:
|
|
|
|
super(ModuleCase, self).runTest()
|
|
|
|
except AttributeError:
|
|
|
|
log.error('ModuleCase runTest() could not execute. Requires at least v2016.8.3 of '
|
|
|
|
'salt-testing package')
|
2016-08-02 00:26:18 +00:00
|
|
|
|
2012-10-17 18:06:17 +00:00
|
|
|
def minion_run(self, _function, *args, **kw):
|
|
|
|
'''
|
|
|
|
Run a single salt function on the 'minion' target and condition
|
|
|
|
the return down to match the behavior of the raw function call
|
|
|
|
'''
|
|
|
|
return self.run_function(_function, args, **kw)
|
|
|
|
|
2013-04-25 19:59:34 +00:00
|
|
|
def run_function(self, function, arg=(), minion_tgt='minion', timeout=25,
|
2013-01-14 14:07:58 +00:00
|
|
|
**kwargs):
|
2012-02-20 12:18:13 +00:00
|
|
|
'''
|
|
|
|
Run a single salt function and condition the return down to match the
|
|
|
|
behavior of the raw function call
|
|
|
|
'''
|
2013-09-05 22:06:41 +00:00
|
|
|
know_to_return_none = (
|
|
|
|
'file.chown', 'file.chgrp', 'ssh.recv_known_host'
|
|
|
|
)
|
2012-08-04 22:25:37 +00:00
|
|
|
orig = self.client.cmd(
|
2013-04-22 21:04:33 +00:00
|
|
|
minion_tgt, function, arg, timeout=timeout, kwarg=kwargs
|
2012-08-04 22:25:37 +00:00
|
|
|
)
|
2012-09-01 06:29:06 +00:00
|
|
|
|
2012-09-07 19:03:44 +00:00
|
|
|
if minion_tgt not in orig:
|
|
|
|
self.skipTest(
|
|
|
|
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
|
2012-10-05 06:10:53 +00:00
|
|
|
'from the minion \'{0}\'. Command output: {1}'.format(
|
|
|
|
minion_tgt, orig
|
|
|
|
)
|
2012-09-07 19:03:44 +00:00
|
|
|
)
|
|
|
|
elif orig[minion_tgt] is None and function not in know_to_return_none:
|
2012-09-01 04:20:53 +00:00
|
|
|
self.skipTest(
|
|
|
|
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get \'{0}\' from '
|
2012-10-05 06:10:53 +00:00
|
|
|
'the minion \'{1}\'. Command output: {2}'.format(
|
|
|
|
function, minion_tgt, orig
|
|
|
|
)
|
2012-09-01 04:20:53 +00:00
|
|
|
)
|
2014-02-04 22:36:39 +00:00
|
|
|
|
|
|
|
# Try to match stalled state functions
|
|
|
|
orig[minion_tgt] = self._check_state_return(
|
2014-09-12 05:32:51 +00:00
|
|
|
orig[minion_tgt]
|
2014-02-04 22:36:39 +00:00
|
|
|
)
|
2014-02-04 21:07:13 +00:00
|
|
|
|
2012-09-01 06:29:06 +00:00
|
|
|
return orig[minion_tgt]
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2012-05-13 02:40:28 +00:00
|
|
|
def run_state(self, function, **kwargs):
|
|
|
|
'''
|
|
|
|
Run the state.single command and return the state return structure
|
|
|
|
'''
|
2014-02-04 21:07:13 +00:00
|
|
|
ret = self.run_function('state.single', [function], **kwargs)
|
|
|
|
return self._check_state_return(ret)
|
2012-05-13 02:40:28 +00:00
|
|
|
|
2012-06-30 20:54:23 +00:00
|
|
|
@property
|
2012-02-20 12:18:13 +00:00
|
|
|
def minion_opts(self):
|
|
|
|
'''
|
|
|
|
Return the options used for the minion
|
|
|
|
'''
|
2014-10-06 18:59:42 +00:00
|
|
|
return self.get_config('minion')
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2012-06-30 20:54:23 +00:00
|
|
|
@property
|
2012-09-01 06:29:06 +00:00
|
|
|
def sub_minion_opts(self):
|
2012-02-20 12:18:13 +00:00
|
|
|
'''
|
2016-03-04 19:13:27 +00:00
|
|
|
Return the options used for the sub_minion
|
2012-02-20 12:18:13 +00:00
|
|
|
'''
|
2014-10-06 18:59:42 +00:00
|
|
|
return self.get_config('sub_minion')
|
2012-09-01 06:29:06 +00:00
|
|
|
|
2014-09-12 05:32:51 +00:00
|
|
|
def _check_state_return(self, ret):
|
2014-02-04 21:07:13 +00:00
|
|
|
if isinstance(ret, dict):
|
|
|
|
# This is the supposed return format for state calls
|
|
|
|
return ret
|
|
|
|
|
|
|
|
if isinstance(ret, list):
|
2014-02-04 22:49:13 +00:00
|
|
|
jids = []
|
2014-02-04 21:07:13 +00:00
|
|
|
# These are usually errors
|
2014-02-04 22:49:13 +00:00
|
|
|
for item in ret[:]:
|
2014-11-21 19:41:22 +00:00
|
|
|
if not isinstance(item, six.string_types):
|
2014-02-04 21:07:13 +00:00
|
|
|
# We don't know how to handle this
|
|
|
|
continue
|
|
|
|
match = STATE_FUNCTION_RUNNING_RE.match(item)
|
|
|
|
if not match:
|
|
|
|
# We don't know how to handle this
|
|
|
|
continue
|
2014-02-05 02:32:04 +00:00
|
|
|
jid = match.group('jid')
|
2014-02-04 22:49:13 +00:00
|
|
|
if jid in jids:
|
|
|
|
continue
|
|
|
|
|
|
|
|
jids.append(jid)
|
|
|
|
|
2014-02-04 21:07:13 +00:00
|
|
|
job_data = self.run_function(
|
2014-02-07 18:00:40 +00:00
|
|
|
'saltutil.find_job', [jid]
|
2014-02-04 21:07:13 +00:00
|
|
|
)
|
2014-02-04 22:49:13 +00:00
|
|
|
job_kill = self.run_function('saltutil.kill_job', [jid])
|
|
|
|
msg = (
|
2014-02-04 21:07:13 +00:00
|
|
|
'A running state.single was found causing a state lock. '
|
2015-08-27 04:26:07 +00:00
|
|
|
'Job details: \'{0}\' Killing Job Returned: \'{1}\''.format(
|
2014-02-08 00:33:26 +00:00
|
|
|
job_data, job_kill
|
|
|
|
)
|
2014-02-04 21:07:13 +00:00
|
|
|
)
|
2014-02-08 00:33:26 +00:00
|
|
|
ret.append('[TEST SUITE ENFORCED]{0}'
|
2014-02-04 22:49:13 +00:00
|
|
|
'[/TEST SUITE ENFORCED]'.format(msg))
|
2014-02-04 21:07:13 +00:00
|
|
|
return ret
|
|
|
|
|
2012-05-29 16:40:20 +00:00
|
|
|
|
2012-12-04 11:51:37 +00:00
|
|
|
class SyndicCase(TestCase, SaltClientTestCaseMixIn):
|
2012-03-29 04:25:59 +00:00
|
|
|
'''
|
|
|
|
Execute a syndic based execution test
|
|
|
|
'''
|
2012-12-04 11:51:37 +00:00
|
|
|
_salt_client_config_file_name_ = 'syndic_master'
|
2012-03-29 04:25:59 +00:00
|
|
|
|
|
|
|
def run_function(self, function, arg=()):
|
|
|
|
'''
|
|
|
|
Run a single salt function and condition the return down to match the
|
|
|
|
behavior of the raw function call
|
|
|
|
'''
|
2013-04-25 19:59:34 +00:00
|
|
|
orig = self.client.cmd('minion', function, arg, timeout=25)
|
2012-09-07 19:24:35 +00:00
|
|
|
if 'minion' not in orig:
|
|
|
|
self.skipTest(
|
|
|
|
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
|
2012-10-05 06:10:53 +00:00
|
|
|
'from the minion. Command output: {0}'.format(orig)
|
2012-09-07 19:24:35 +00:00
|
|
|
)
|
2012-03-29 04:25:59 +00:00
|
|
|
return orig['minion']
|
2012-04-21 22:58:03 +00:00
|
|
|
|
2012-05-13 02:40:28 +00:00
|
|
|
|
2016-05-12 12:03:37 +00:00
|
|
|
class ShellCase(AdaptedConfigurationTestCaseMixIn, ShellTestCase, ScriptPathMixin):
|
2012-04-21 22:58:03 +00:00
|
|
|
'''
|
|
|
|
Execute a test for a shell command
|
|
|
|
'''
|
2012-11-18 23:49:37 +00:00
|
|
|
|
2013-06-25 13:13:14 +00:00
|
|
|
_code_dir_ = CODE_DIR
|
|
|
|
_script_dir_ = SCRIPT_DIR
|
|
|
|
_python_executable_ = PYEXEC
|
2012-04-21 23:27:59 +00:00
|
|
|
|
2015-07-28 11:07:04 +00:00
|
|
|
def chdir(self, dirname):
|
|
|
|
try:
|
|
|
|
os.chdir(dirname)
|
|
|
|
except OSError:
|
|
|
|
os.chdir(INTEGRATION_TEST_DIR)
|
|
|
|
|
2016-08-02 22:30:09 +00:00
|
|
|
def run_salt(self, arg_str, with_retcode=False, catch_stderr=False, timeout=30): # pylint: disable=W0221
|
2012-04-21 23:52:29 +00:00
|
|
|
'''
|
2012-05-23 14:14:16 +00:00
|
|
|
Execute salt
|
2012-04-21 23:52:29 +00:00
|
|
|
'''
|
2013-08-30 17:24:15 +00:00
|
|
|
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
|
2016-08-02 22:30:09 +00:00
|
|
|
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=timeout)
|
2012-04-21 23:52:29 +00:00
|
|
|
|
2016-07-27 16:42:44 +00:00
|
|
|
def run_ssh(self, arg_str, with_retcode=False, catch_stderr=False, timeout=25): # pylint: disable=W0221
|
2014-05-09 21:54:21 +00:00
|
|
|
'''
|
|
|
|
Execute salt-ssh
|
|
|
|
'''
|
2016-08-31 11:42:27 +00:00
|
|
|
arg_str = '-ldebug -W -c {0} -i --priv {1} --roster-file {2} --out=json localhost {3}'.format(self.get_config_dir(), os.path.join(TMP_CONF_DIR, 'key_test'), os.path.join(TMP_CONF_DIR, 'roster'), arg_str)
|
2016-07-27 16:40:02 +00:00
|
|
|
return self.run_script('salt-ssh', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=timeout, raw=True)
|
2014-05-09 21:54:21 +00:00
|
|
|
|
2016-06-29 16:49:14 +00:00
|
|
|
def run_run(self, arg_str, with_retcode=False, catch_stderr=False, async=False, timeout=60, config_dir=None):
|
2012-04-21 23:52:29 +00:00
|
|
|
'''
|
2012-05-23 14:14:16 +00:00
|
|
|
Execute salt-run
|
2012-04-21 23:52:29 +00:00
|
|
|
'''
|
2016-06-29 16:49:14 +00:00
|
|
|
arg_str = '-c {0}{async_flag} -t {timeout} {1}'.format(config_dir or self.get_config_dir(),
|
2015-01-09 20:26:25 +00:00
|
|
|
arg_str,
|
2015-01-26 17:59:55 +00:00
|
|
|
timeout=timeout,
|
2015-01-09 20:26:25 +00:00
|
|
|
async_flag=' --async' if async else '')
|
2016-07-26 16:54:45 +00:00
|
|
|
return self.run_script('salt-run', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=30)
|
2012-04-21 23:52:29 +00:00
|
|
|
|
2016-09-01 16:32:23 +00:00
|
|
|
def run_run_plus(self, fun, *arg, **kwargs):
|
2012-05-28 03:00:10 +00:00
|
|
|
'''
|
2016-09-01 16:39:54 +00:00
|
|
|
Execute the runner function and return the return data and output in a dict
|
2012-05-28 03:00:10 +00:00
|
|
|
'''
|
2016-08-31 03:57:06 +00:00
|
|
|
ret = {'fun': fun}
|
2016-09-01 15:20:18 +00:00
|
|
|
from_scratch = bool(kwargs.pop('__reload_config', False))
|
2016-08-31 03:57:06 +00:00
|
|
|
# Have to create an empty dict and then update it, as the result from
|
2016-09-01 15:20:18 +00:00
|
|
|
# self.get_config() is an ImmutableDict which cannot be updated.
|
2014-10-06 18:59:42 +00:00
|
|
|
opts = {}
|
2016-09-01 00:43:22 +00:00
|
|
|
opts.update(self.get_config('client_config', from_scratch=from_scratch))
|
2016-08-31 03:57:06 +00:00
|
|
|
opts_arg = list(arg)
|
|
|
|
if kwargs:
|
|
|
|
opts_arg.append({'__kwarg__': True})
|
|
|
|
opts_arg[-1].update(kwargs)
|
|
|
|
opts.update({'doc': False, 'fun': fun, 'arg': opts_arg})
|
2012-11-23 12:19:09 +00:00
|
|
|
with RedirectStdStreams():
|
|
|
|
runner = salt.runner.Runner(opts)
|
2016-08-31 03:57:06 +00:00
|
|
|
ret['return'] = runner.run()
|
|
|
|
try:
|
|
|
|
ret['jid'] = runner.jid
|
|
|
|
except AttributeError:
|
|
|
|
ret['jid'] = None
|
2016-09-01 15:20:18 +00:00
|
|
|
|
|
|
|
# Compile output
|
|
|
|
# TODO: Support outputters other than nested
|
|
|
|
opts['color'] = False
|
|
|
|
opts['output_file'] = cStringIO()
|
|
|
|
try:
|
|
|
|
salt.output.display_output(ret['return'], opts=opts)
|
|
|
|
ret['out'] = opts['output_file'].getvalue().splitlines()
|
|
|
|
finally:
|
|
|
|
opts['output_file'].close()
|
|
|
|
|
2012-05-28 03:00:10 +00:00
|
|
|
return ret
|
|
|
|
|
2013-09-13 16:51:25 +00:00
|
|
|
def run_key(self, arg_str, catch_stderr=False, with_retcode=False):
|
2012-04-21 23:27:59 +00:00
|
|
|
'''
|
|
|
|
Execute salt-key
|
|
|
|
'''
|
2013-08-30 17:24:15 +00:00
|
|
|
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
|
2013-09-13 16:51:25 +00:00
|
|
|
return self.run_script(
|
|
|
|
'salt-key',
|
|
|
|
arg_str,
|
|
|
|
catch_stderr=catch_stderr,
|
2016-07-26 16:54:45 +00:00
|
|
|
with_retcode=with_retcode,
|
|
|
|
timeout=30
|
2013-09-13 16:51:25 +00:00
|
|
|
)
|
2012-08-04 21:28:51 +00:00
|
|
|
|
2014-04-24 14:42:17 +00:00
|
|
|
def run_cp(self, arg_str, with_retcode=False, catch_stderr=False):
|
2012-08-04 21:28:51 +00:00
|
|
|
'''
|
|
|
|
Execute salt-cp
|
|
|
|
'''
|
2013-08-30 17:24:15 +00:00
|
|
|
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
|
2016-07-26 16:54:45 +00:00
|
|
|
return self.run_script('salt-cp', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=30)
|
2012-08-13 06:10:42 +00:00
|
|
|
|
2014-04-24 14:42:17 +00:00
|
|
|
def run_call(self, arg_str, with_retcode=False, catch_stderr=False):
|
2016-03-04 19:13:27 +00:00
|
|
|
'''
|
|
|
|
Execute salt-call.
|
|
|
|
'''
|
2013-08-30 17:24:15 +00:00
|
|
|
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
|
2016-07-26 16:54:45 +00:00
|
|
|
return self.run_script('salt-call', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=30)
|
2012-08-04 18:58:32 +00:00
|
|
|
|
2016-10-25 19:30:41 +00:00
|
|
|
def run_cloud(self, arg_str, catch_stderr=False, timeout=15):
|
2013-11-28 17:55:58 +00:00
|
|
|
'''
|
|
|
|
Execute salt-cloud
|
|
|
|
'''
|
2013-11-28 18:30:50 +00:00
|
|
|
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
|
2016-10-25 19:30:41 +00:00
|
|
|
return self.run_script('salt-cloud', arg_str, catch_stderr,
|
|
|
|
timeout=timeout)
|
2013-11-28 17:55:58 +00:00
|
|
|
|
2012-08-04 18:58:32 +00:00
|
|
|
|
2013-06-25 13:41:26 +00:00
|
|
|
class ShellCaseCommonTestsMixIn(CheckShellBinaryNameAndVersionMixIn):
|
2012-08-04 18:58:32 +00:00
|
|
|
|
2014-12-15 18:28:15 +00:00
|
|
|
_call_binary_expected_version_ = salt.version.__version__
|
2012-11-20 16:09:57 +00:00
|
|
|
|
2013-01-14 12:35:42 +00:00
|
|
|
def test_salt_with_git_version(self):
|
|
|
|
if getattr(self, '_call_binary_', None) is None:
|
|
|
|
self.skipTest('\'_call_binary_\' not defined.')
|
|
|
|
from salt.utils import which
|
2013-09-19 17:50:51 +00:00
|
|
|
from salt.version import __version_info__, SaltStackVersion
|
2013-01-14 12:35:42 +00:00
|
|
|
git = which('git')
|
|
|
|
if not git:
|
|
|
|
self.skipTest('The git binary is not available')
|
|
|
|
|
|
|
|
# Let's get the output of git describe
|
|
|
|
process = subprocess.Popen(
|
2014-09-06 01:32:18 +00:00
|
|
|
[git, 'describe', '--tags', '--first-parent', '--match', 'v[0-9]*'],
|
2013-01-14 12:35:42 +00:00
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
close_fds=True,
|
2013-01-15 14:16:39 +00:00
|
|
|
cwd=CODE_DIR
|
2013-01-14 12:35:42 +00:00
|
|
|
)
|
2013-01-15 14:16:39 +00:00
|
|
|
out, err = process.communicate()
|
2014-09-06 01:32:18 +00:00
|
|
|
if process.returncode != 0:
|
|
|
|
process = subprocess.Popen(
|
|
|
|
[git, 'describe', '--tags', '--match', 'v[0-9]*'],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
close_fds=True,
|
|
|
|
cwd=CODE_DIR
|
|
|
|
)
|
|
|
|
out, err = process.communicate()
|
2013-01-14 12:35:42 +00:00
|
|
|
if not out:
|
2013-01-15 14:16:39 +00:00
|
|
|
self.skipTest(
|
|
|
|
'Failed to get the output of \'git describe\'. '
|
2015-08-27 04:26:07 +00:00
|
|
|
'Error: \'{0}\''.format(
|
2015-06-06 13:47:52 +00:00
|
|
|
salt.utils.to_str(err)
|
2013-01-15 14:16:39 +00:00
|
|
|
)
|
|
|
|
)
|
2013-01-14 12:35:42 +00:00
|
|
|
|
2013-09-19 17:50:51 +00:00
|
|
|
parsed_version = SaltStackVersion.parse(out)
|
2013-09-05 02:34:12 +00:00
|
|
|
|
2013-09-19 17:50:51 +00:00
|
|
|
if parsed_version.info < __version_info__:
|
2013-01-15 19:42:24 +00:00
|
|
|
self.skipTest(
|
2013-09-05 02:34:12 +00:00
|
|
|
'We\'re likely about to release a new version. This test '
|
2015-08-27 04:26:07 +00:00
|
|
|
'would fail. Parsed(\'{0}\') < Expected(\'{1}\')'.format(
|
2013-09-19 17:50:51 +00:00
|
|
|
parsed_version.info, __version_info__
|
2013-01-15 19:42:24 +00:00
|
|
|
)
|
|
|
|
)
|
2013-09-19 17:50:51 +00:00
|
|
|
elif parsed_version.info != __version_info__:
|
2013-01-14 12:35:42 +00:00
|
|
|
self.skipTest(
|
|
|
|
'In order to get the proper salt version with the '
|
|
|
|
'git hash you need to update salt\'s local git '
|
|
|
|
'tags. Something like: \'git fetch --tags\' or '
|
|
|
|
'\'git fetch --tags upstream\' if you followed '
|
|
|
|
'salt\'s contribute documentation. The version '
|
|
|
|
'string WILL NOT include the git hash.'
|
|
|
|
)
|
|
|
|
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
|
2013-09-19 17:50:51 +00:00
|
|
|
self.assertIn(parsed_version.string, out)
|
2013-01-14 12:35:42 +00:00
|
|
|
|
2012-11-20 16:09:57 +00:00
|
|
|
|
2014-12-13 00:11:41 +00:00
|
|
|
@requires_sshd_server
|
2014-05-12 22:38:16 +00:00
|
|
|
class SSHCase(ShellCase):
|
|
|
|
'''
|
|
|
|
Execute a command via salt-ssh
|
|
|
|
'''
|
|
|
|
def _arg_str(self, function, arg):
|
|
|
|
return '{0} {1}'.format(function, ' '.join(arg))
|
|
|
|
|
2016-08-31 12:34:38 +00:00
|
|
|
def run_function(self, function, arg=(), timeout=90, **kwargs):
|
|
|
|
'''
|
|
|
|
We use a 90s timeout here, which some slower systems do end up needing
|
|
|
|
'''
|
2016-07-27 16:40:02 +00:00
|
|
|
ret = self.run_ssh(self._arg_str(function, arg), timeout=timeout)
|
2014-05-12 22:38:16 +00:00
|
|
|
try:
|
|
|
|
return json.loads(ret)['localhost']
|
|
|
|
except Exception:
|
|
|
|
return ret
|
|
|
|
|
|
|
|
|
2012-11-20 16:09:57 +00:00
|
|
|
class SaltReturnAssertsMixIn(object):
|
|
|
|
|
2012-11-25 22:29:08 +00:00
|
|
|
def assertReturnSaltType(self, ret):
|
2012-11-20 16:09:57 +00:00
|
|
|
try:
|
2012-11-21 12:16:17 +00:00
|
|
|
self.assertTrue(isinstance(ret, dict))
|
2012-11-20 16:09:57 +00:00
|
|
|
except AssertionError:
|
|
|
|
raise AssertionError(
|
2012-11-21 12:16:17 +00:00
|
|
|
'{0} is not dict. Salt returned: {1}'.format(
|
2012-11-25 22:29:08 +00:00
|
|
|
type(ret).__name__, ret
|
2012-11-20 16:09:57 +00:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2012-11-25 22:29:08 +00:00
|
|
|
def assertReturnNonEmptySaltType(self, ret):
|
|
|
|
self.assertReturnSaltType(ret)
|
2012-11-21 12:16:17 +00:00
|
|
|
try:
|
|
|
|
self.assertNotEqual(ret, {})
|
|
|
|
except AssertionError:
|
|
|
|
raise AssertionError(
|
|
|
|
'{} is equal to {}. Salt returned an empty dictionary.'
|
|
|
|
)
|
|
|
|
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
def __return_valid_keys(self, keys):
|
2012-12-12 17:11:44 +00:00
|
|
|
if isinstance(keys, tuple):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
# If it's a tuple, turn it into a list
|
2012-12-07 12:46:02 +00:00
|
|
|
keys = list(keys)
|
2014-11-21 19:41:22 +00:00
|
|
|
elif isinstance(keys, six.string_types):
|
|
|
|
# If it's a string, make it a one item list
|
2012-12-07 12:46:02 +00:00
|
|
|
keys = [keys]
|
2012-12-12 17:11:44 +00:00
|
|
|
elif not isinstance(keys, list):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
# If we've reached here, it's a bad type passed to keys
|
2012-12-07 12:46:02 +00:00
|
|
|
raise RuntimeError('The passed keys need to be a list')
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
return keys
|
2012-12-07 12:46:02 +00:00
|
|
|
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
def __getWithinSaltReturn(self, ret, keys):
|
2012-12-12 17:11:44 +00:00
|
|
|
self.assertReturnNonEmptySaltType(ret)
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
keys = self.__return_valid_keys(keys)
|
|
|
|
okeys = keys[:]
|
2014-11-21 19:41:22 +00:00
|
|
|
for part in six.itervalues(ret):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
try:
|
|
|
|
ret_item = part[okeys.pop(0)]
|
|
|
|
except (KeyError, TypeError):
|
|
|
|
raise AssertionError(
|
|
|
|
'Could not get ret{0} from salt\'s return: {1}'.format(
|
2015-08-27 04:26:07 +00:00
|
|
|
''.join(['[\'{0}\']'.format(k) for k in keys]), part
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
)
|
|
|
|
)
|
2012-12-07 12:46:02 +00:00
|
|
|
while okeys:
|
|
|
|
try:
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
ret_item = ret_item[okeys.pop(0)]
|
2012-12-07 12:46:02 +00:00
|
|
|
except (KeyError, TypeError):
|
|
|
|
raise AssertionError(
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
'Could not get ret{0} from salt\'s return: {1}'.format(
|
2015-08-27 04:26:07 +00:00
|
|
|
''.join(['[\'{0}\']'.format(k) for k in keys]), part
|
2012-12-07 12:46:02 +00:00
|
|
|
)
|
|
|
|
)
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
return ret_item
|
|
|
|
|
2012-12-12 17:11:44 +00:00
|
|
|
def assertSaltTrueReturn(self, ret):
|
|
|
|
try:
|
|
|
|
self.assertTrue(self.__getWithinSaltReturn(ret, 'result'))
|
|
|
|
except AssertionError:
|
2013-04-25 19:59:34 +00:00
|
|
|
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
|
2013-01-14 14:07:58 +00:00
|
|
|
try:
|
|
|
|
raise AssertionError(
|
|
|
|
'{result} is not True. Salt Comment:\n{comment}'.format(
|
2015-04-08 18:43:55 +00:00
|
|
|
**(next(six.itervalues(ret)))
|
2013-01-14 14:07:58 +00:00
|
|
|
)
|
|
|
|
)
|
2013-04-25 19:59:34 +00:00
|
|
|
except (AttributeError, IndexError):
|
2013-01-14 14:07:58 +00:00
|
|
|
raise AssertionError(
|
2013-04-25 19:59:34 +00:00
|
|
|
'Failed to get result. Salt Returned:\n{0}'.format(
|
|
|
|
pprint.pformat(ret)
|
|
|
|
)
|
2012-12-12 17:11:44 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
def assertSaltFalseReturn(self, ret):
|
|
|
|
try:
|
|
|
|
self.assertFalse(self.__getWithinSaltReturn(ret, 'result'))
|
|
|
|
except AssertionError:
|
2013-04-25 19:59:34 +00:00
|
|
|
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
|
2013-01-14 14:07:58 +00:00
|
|
|
try:
|
|
|
|
raise AssertionError(
|
|
|
|
'{result} is not False. Salt Comment:\n{comment}'.format(
|
2015-04-08 18:43:55 +00:00
|
|
|
**(next(six.itervalues(ret)))
|
2013-01-14 14:07:58 +00:00
|
|
|
)
|
|
|
|
)
|
2013-04-25 19:59:34 +00:00
|
|
|
except (AttributeError, IndexError):
|
2013-01-14 14:07:58 +00:00
|
|
|
raise AssertionError(
|
|
|
|
'Failed to get result. Salt Returned: {0}'.format(ret)
|
2012-12-12 17:11:44 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
def assertSaltNoneReturn(self, ret):
|
|
|
|
try:
|
|
|
|
self.assertIsNone(self.__getWithinSaltReturn(ret, 'result'))
|
|
|
|
except AssertionError:
|
2013-04-25 19:59:34 +00:00
|
|
|
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
|
2013-01-14 14:07:58 +00:00
|
|
|
try:
|
|
|
|
raise AssertionError(
|
|
|
|
'{result} is not None. Salt Comment:\n{comment}'.format(
|
2015-04-08 18:43:55 +00:00
|
|
|
**(next(six.itervalues(ret)))
|
2013-01-14 14:07:58 +00:00
|
|
|
)
|
|
|
|
)
|
2013-04-25 19:59:34 +00:00
|
|
|
except (AttributeError, IndexError):
|
2013-01-14 14:07:58 +00:00
|
|
|
raise AssertionError(
|
|
|
|
'Failed to get result. Salt Returned: {0}'.format(ret)
|
2012-12-12 17:11:44 +00:00
|
|
|
)
|
|
|
|
|
2013-08-18 04:46:33 +00:00
|
|
|
def assertInSaltComment(self, in_comment, ret):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
return self.assertIn(
|
|
|
|
in_comment, self.__getWithinSaltReturn(ret, 'comment')
|
|
|
|
)
|
|
|
|
|
2013-08-18 04:49:18 +00:00
|
|
|
def assertNotInSaltComment(self, not_in_comment, ret):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
return self.assertNotIn(
|
|
|
|
not_in_comment, self.__getWithinSaltReturn(ret, 'comment')
|
|
|
|
)
|
|
|
|
|
|
|
|
def assertSaltCommentRegexpMatches(self, ret, pattern):
|
|
|
|
return self.assertInSaltReturnRegexpMatches(ret, pattern, 'comment')
|
|
|
|
|
2014-12-10 20:03:28 +00:00
|
|
|
def assertInSaltStateWarning(self, in_comment, ret):
|
2013-07-27 14:58:28 +00:00
|
|
|
return self.assertIn(
|
|
|
|
in_comment, self.__getWithinSaltReturn(ret, 'warnings')
|
|
|
|
)
|
|
|
|
|
|
|
|
def assertNotInSaltStateWarning(self, not_in_comment, ret):
|
|
|
|
return self.assertNotIn(
|
|
|
|
not_in_comment, self.__getWithinSaltReturn(ret, 'warnings')
|
|
|
|
)
|
|
|
|
|
2013-08-18 05:01:38 +00:00
|
|
|
def assertInSaltReturn(self, item_to_check, ret, keys):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
return self.assertIn(
|
|
|
|
item_to_check, self.__getWithinSaltReturn(ret, keys)
|
|
|
|
)
|
|
|
|
|
2013-08-18 04:58:42 +00:00
|
|
|
def assertNotInSaltReturn(self, item_to_check, ret, keys):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
return self.assertNotIn(
|
|
|
|
item_to_check, self.__getWithinSaltReturn(ret, keys)
|
|
|
|
)
|
|
|
|
|
|
|
|
def assertInSaltReturnRegexpMatches(self, ret, pattern, keys=()):
|
|
|
|
return self.assertRegexpMatches(
|
|
|
|
self.__getWithinSaltReturn(ret, keys), pattern
|
|
|
|
)
|
2012-12-07 12:46:02 +00:00
|
|
|
|
|
|
|
def assertSaltStateChangesEqual(self, ret, comparison, keys=()):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
keys = ['changes'] + self.__return_valid_keys(keys)
|
|
|
|
return self.assertEqual(
|
|
|
|
self.__getWithinSaltReturn(ret, keys), comparison
|
|
|
|
)
|
2012-12-07 12:46:02 +00:00
|
|
|
|
|
|
|
def assertSaltStateChangesNotEqual(self, ret, comparison, keys=()):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
keys = ['changes'] + self.__return_valid_keys(keys)
|
|
|
|
return self.assertNotEqual(
|
|
|
|
self.__getWithinSaltReturn(ret, keys), comparison
|
|
|
|
)
|