2013-11-27 11:19:24 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2012-03-28 06:10:13 +00:00
|
|
|
'''
|
|
|
|
Set up the Salt integration test suite
|
|
|
|
'''
|
|
|
|
|
|
|
|
# Import Python libs
|
2014-11-21 19:05:13 +00:00
|
|
|
from __future__ import absolute_import, print_function
|
2012-02-20 12:18:13 +00:00
|
|
|
import os
|
2014-02-04 21:07:13 +00:00
|
|
|
import re
|
2012-04-21 23:27:59 +00:00
|
|
|
import sys
|
2014-06-12 23:59:55 +00:00
|
|
|
import copy
|
|
|
|
import json
|
2013-06-29 19:57:23 +00:00
|
|
|
import time
|
2016-05-08 18:39:57 +00:00
|
|
|
import stat
|
2016-05-10 13:05:32 +00:00
|
|
|
import errno
|
2014-10-30 06:45:52 +00:00
|
|
|
import signal
|
2012-03-28 06:10:13 +00:00
|
|
|
import shutil
|
2013-04-25 19:59:34 +00:00
|
|
|
import pprint
|
2014-11-26 00:41:02 +00:00
|
|
|
import atexit
|
2016-05-06 18:49:08 +00:00
|
|
|
import socket
|
2013-04-25 19:59:34 +00:00
|
|
|
import logging
|
2013-06-29 19:57:23 +00:00
|
|
|
import tempfile
|
2016-05-06 18:49:08 +00:00
|
|
|
import threading
|
2013-01-14 12:35:42 +00:00
|
|
|
import subprocess
|
2013-06-29 19:57:23 +00:00
|
|
|
import multiprocessing
|
2012-11-17 17:37:53 +00:00
|
|
|
from datetime import datetime, timedelta
|
2012-06-23 09:47:53 +00:00
|
|
|
try:
|
|
|
|
import pwd
|
|
|
|
except ImportError:
|
|
|
|
pass
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2014-02-04 21:07:13 +00:00
|
|
|
STATE_FUNCTION_RUNNING_RE = re.compile(
|
|
|
|
r'''The function (?:"|')(?P<state_func>.*)(?:"|') is running as PID '''
|
|
|
|
r'(?P<pid>[\d]+) and was started at (?P<date>.*) with jid (?P<jid>[\d]+)'
|
|
|
|
)
|
2017-02-26 14:20:41 +00:00
|
|
|
|
|
|
|
TESTS_DIR = os.path.dirname(os.path.dirname(os.path.normpath(os.path.abspath(__file__))))
|
2017-01-30 22:25:31 +00:00
|
|
|
if os.name == 'nt':
|
2017-02-26 14:20:41 +00:00
|
|
|
TESTS_DIR = TESTS_DIR.replace('\\', '\\\\')
|
|
|
|
CODE_DIR = os.path.dirname(TESTS_DIR)
|
|
|
|
|
|
|
|
# Let's inject CODE_DIR so salt is importable if not there already
|
|
|
|
if CODE_DIR not in sys.path:
|
|
|
|
sys.path.insert(0, CODE_DIR)
|
|
|
|
|
|
|
|
# Import salt tests support dirs
|
|
|
|
from tests.support.paths import * # pylint: disable=wildcard-import
|
|
|
|
from tests.support.processes import * # pylint: disable=wildcard-import
|
2017-02-27 13:58:07 +00:00
|
|
|
from tests.support.unit import TestCase
|
|
|
|
from tests.support.case import ShellTestCase
|
|
|
|
from tests.support.mixins import CheckShellBinaryNameAndVersionMixin, ShellCaseCommonTestsMixin
|
|
|
|
from tests.support.parser import PNUM, print_header, SaltTestcaseParser
|
2017-02-27 15:59:04 +00:00
|
|
|
from tests.support.helpers import requires_sshd_server, RedirectStdStreams
|
2013-06-24 23:50:02 +00:00
|
|
|
|
2012-03-28 06:10:13 +00:00
|
|
|
# Import Salt libs
|
2012-02-20 12:18:13 +00:00
|
|
|
import salt
|
|
|
|
import salt.config
|
|
|
|
import salt.minion
|
2012-05-28 03:00:10 +00:00
|
|
|
import salt.runner
|
2013-02-08 00:35:50 +00:00
|
|
|
import salt.output
|
2013-08-16 01:01:26 +00:00
|
|
|
import salt.version
|
2014-01-11 20:51:32 +00:00
|
|
|
import salt.utils
|
Add warnings to test suite when requisites are not installed
Since we have recently changed the test suite to use new-style
git_pillar, GitPython or Pygit2 is a hard dep for the test suite.
Additionally, when starting up the daemons, if no IPv4 addresses can be
detected (which can happen on docker containers which tend to have
minimal installs) then the suite will time out trying to detect whether
or not the minion/sub-minion has connected, which while it does not
prove fatal for the test suite, it does make the suite take several
minutes to start up and begin running tests. This is because the test
suite invokes the manage.joined runner, which in turn invokes
salt.utils.network.ip_addrs() to get the system's IP addresses to match
against those which are connected. If it can't get the IP addresses,
then the manage.joined runner returns an empty list, and the test suite
believes that no minions have connected, and the function that
periodically runs manage.joined will eventually time out.
This commit adds messages to the console when no suitable gitfs provider
is installed, and when salt.utils.network.ip_addrs() returns an empty
list, to hopefully prompt the user to install the missing requisites.
2017-02-25 22:06:52 +00:00
|
|
|
import salt.utils.network
|
2014-07-08 18:18:24 +00:00
|
|
|
import salt.utils.process
|
2015-08-03 12:20:08 +00:00
|
|
|
import salt.log.setup as salt_log_setup
|
2016-08-31 03:57:06 +00:00
|
|
|
from salt.ext import six
|
2012-03-09 07:47:34 +00:00
|
|
|
from salt.utils.verify import verify_env
|
2014-10-06 18:59:42 +00:00
|
|
|
from salt.utils.immutabletypes import freeze
|
2016-05-10 00:47:09 +00:00
|
|
|
from salt.utils.nb_popen import NonBlockingPopen
|
2015-04-04 22:20:52 +00:00
|
|
|
from salt.exceptions import SaltClientError
|
2012-11-06 11:20:06 +00:00
|
|
|
|
Add warnings to test suite when requisites are not installed
Since we have recently changed the test suite to use new-style
git_pillar, GitPython or Pygit2 is a hard dep for the test suite.
Additionally, when starting up the daemons, if no IPv4 addresses can be
detected (which can happen on docker containers which tend to have
minimal installs) then the suite will time out trying to detect whether
or not the minion/sub-minion has connected, which while it does not
prove fatal for the test suite, it does make the suite take several
minutes to start up and begin running tests. This is because the test
suite invokes the manage.joined runner, which in turn invokes
salt.utils.network.ip_addrs() to get the system's IP addresses to match
against those which are connected. If it can't get the IP addresses,
then the manage.joined runner returns an empty list, and the test suite
believes that no minions have connected, and the function that
periodically runs manage.joined will eventually time out.
This commit adds messages to the console when no suitable gitfs provider
is installed, and when salt.utils.network.ip_addrs() returns an empty
list, to hopefully prompt the user to install the missing requisites.
2017-02-25 22:06:52 +00:00
|
|
|
try:
|
|
|
|
from salt.utils.gitfs import HAS_GITPYTHON, HAS_PYGIT2
|
|
|
|
HAS_GITFS = HAS_GITPYTHON or HAS_PYGIT2
|
|
|
|
except ImportError:
|
|
|
|
HAS_GITFS = False
|
|
|
|
|
2016-08-31 03:57:06 +00:00
|
|
|
try:
|
|
|
|
from shlex import quote as _quote # pylint: disable=E0611
|
|
|
|
except ImportError:
|
|
|
|
from pipes import quote as _quote
|
|
|
|
|
2014-06-27 19:13:41 +00:00
|
|
|
try:
|
|
|
|
import salt.master
|
|
|
|
except ImportError:
|
2014-11-22 10:12:06 +00:00
|
|
|
# Not required for raet tests
|
2014-06-27 19:13:41 +00:00
|
|
|
pass
|
|
|
|
|
2013-08-30 17:24:15 +00:00
|
|
|
# Import 3rd-party libs
|
|
|
|
import yaml
|
2016-05-06 18:49:08 +00:00
|
|
|
import msgpack
|
2014-11-22 10:12:06 +00:00
|
|
|
import salt.ext.six as six
|
2016-09-01 15:20:18 +00:00
|
|
|
from salt.ext.six.moves import cStringIO
|
2016-07-19 17:04:24 +00:00
|
|
|
|
|
|
|
try:
|
2016-07-20 21:34:58 +00:00
|
|
|
import salt.ext.six.moves.socketserver as socketserver
|
2016-07-19 17:04:24 +00:00
|
|
|
except ImportError:
|
|
|
|
import socketserver
|
2016-06-29 20:30:18 +00:00
|
|
|
|
2015-11-02 23:15:02 +00:00
|
|
|
if salt.utils.is_windows():
|
|
|
|
import win32api
|
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
from tornado import gen
|
|
|
|
from tornado import ioloop
|
|
|
|
|
2017-02-26 14:20:41 +00:00
|
|
|
# Import salt tests support libs
|
|
|
|
from tests.support.processes import SaltMaster, SaltMinion, SaltSyndic
|
2016-11-24 13:19:56 +00:00
|
|
|
|
2014-10-06 18:59:42 +00:00
|
|
|
RUNTIME_CONFIGS = {}
|
|
|
|
|
2013-04-25 19:59:34 +00:00
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2014-11-26 00:41:02 +00:00
|
|
|
def cleanup_runtime_config_instance(to_cleanup):
|
2014-11-26 00:20:27 +00:00
|
|
|
# Explicit and forced cleanup
|
2015-01-28 14:24:40 +00:00
|
|
|
for key in list(to_cleanup.keys()):
|
2014-11-26 00:41:02 +00:00
|
|
|
instance = to_cleanup.pop(key)
|
2014-11-26 00:20:27 +00:00
|
|
|
del instance
|
|
|
|
|
|
|
|
|
2014-11-26 00:41:02 +00:00
|
|
|
atexit.register(cleanup_runtime_config_instance, RUNTIME_CONFIGS)
|
|
|
|
|
2016-05-10 12:19:43 +00:00
|
|
|
_RUNTESTS_PORTS = {}
|
2014-11-26 00:41:02 +00:00
|
|
|
|
2017-04-01 13:21:19 +00:00
|
|
|
if salt.utils.is_windows():
|
|
|
|
RUNNING_TESTS_USER = win32api.GetUserName()
|
|
|
|
else:
|
|
|
|
RUNNING_TESTS_USER = pwd.getpwuid(os.getuid()).pw_name
|
|
|
|
|
2016-05-09 00:27:09 +00:00
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
def get_unused_localhost_port():
|
|
|
|
'''
|
|
|
|
Return a random unused port on localhost
|
|
|
|
'''
|
|
|
|
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
|
2016-05-10 12:19:43 +00:00
|
|
|
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
2016-05-06 18:49:08 +00:00
|
|
|
usock.bind(('127.0.0.1', 0))
|
|
|
|
port = usock.getsockname()[1]
|
2016-07-07 18:27:48 +00:00
|
|
|
if port in (54505, 54506, 64505, 64506, 64510, 64511, 64520, 64521):
|
2016-05-23 12:54:02 +00:00
|
|
|
# These ports are hardcoded in the test configuration
|
2016-05-23 15:33:29 +00:00
|
|
|
port = get_unused_localhost_port()
|
2016-05-23 12:54:02 +00:00
|
|
|
usock.close()
|
2016-05-23 15:33:29 +00:00
|
|
|
return port
|
2016-05-23 12:54:02 +00:00
|
|
|
|
2016-09-15 21:37:00 +00:00
|
|
|
DARWIN = True if sys.platform.startswith('darwin') else False
|
|
|
|
BSD = True if 'bsd' in sys.platform else False
|
|
|
|
|
|
|
|
if DARWIN and port in _RUNTESTS_PORTS:
|
2016-08-31 19:09:51 +00:00
|
|
|
port = get_unused_localhost_port()
|
|
|
|
usock.close()
|
|
|
|
return port
|
|
|
|
|
2016-05-10 12:19:43 +00:00
|
|
|
_RUNTESTS_PORTS[port] = usock
|
|
|
|
|
2016-09-15 21:37:00 +00:00
|
|
|
if DARWIN or BSD:
|
2016-08-31 19:09:51 +00:00
|
|
|
usock.close()
|
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
return port
|
|
|
|
|
|
|
|
|
2016-05-10 12:19:43 +00:00
|
|
|
def close_open_sockets(sockets_dict):
|
|
|
|
for port in list(sockets_dict):
|
|
|
|
sock = sockets_dict.pop(port)
|
|
|
|
sock.close()
|
|
|
|
|
|
|
|
|
|
|
|
atexit.register(close_open_sockets, _RUNTESTS_PORTS)
|
|
|
|
|
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
SALT_LOG_PORT = get_unused_localhost_port()
|
|
|
|
|
|
|
|
|
|
|
|
class ThreadingMixIn(socketserver.ThreadingMixIn):
|
|
|
|
daemon_threads = True
|
|
|
|
|
|
|
|
|
|
|
|
class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer):
|
|
|
|
|
2016-05-10 12:19:43 +00:00
|
|
|
allow_reuse_address = True
|
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
def server_activate(self):
|
|
|
|
self.shutting_down = threading.Event()
|
|
|
|
socketserver.TCPServer.server_activate(self)
|
|
|
|
#super(ThreadedSocketServer, self).server_activate()
|
|
|
|
|
|
|
|
def server_close(self):
|
2016-05-10 12:19:43 +00:00
|
|
|
if hasattr(self, 'shutting_down'):
|
|
|
|
self.shutting_down.set()
|
2016-05-06 18:49:08 +00:00
|
|
|
socketserver.TCPServer.server_close(self)
|
|
|
|
#super(ThreadedSocketServer, self).server_close()
|
|
|
|
|
|
|
|
|
|
|
|
class SocketServerRequestHandler(socketserver.StreamRequestHandler):
|
|
|
|
def handle(self):
|
|
|
|
unpacker = msgpack.Unpacker(encoding='utf-8')
|
|
|
|
while not self.server.shutting_down.is_set():
|
|
|
|
try:
|
|
|
|
wire_bytes = self.request.recv(1024)
|
|
|
|
if not wire_bytes:
|
|
|
|
break
|
|
|
|
unpacker.feed(wire_bytes)
|
|
|
|
for record_dict in unpacker:
|
|
|
|
record = logging.makeLogRecord(record_dict)
|
|
|
|
logger = logging.getLogger(record.name)
|
|
|
|
logger.handle(record)
|
|
|
|
except (EOFError, KeyboardInterrupt, SystemExit):
|
|
|
|
break
|
2016-05-12 12:35:19 +00:00
|
|
|
except socket.error as exc:
|
|
|
|
try:
|
2016-05-12 12:38:22 +00:00
|
|
|
if exc.errno == errno.WSAECONNRESET:
|
2016-05-12 12:35:19 +00:00
|
|
|
# Connection reset on windows
|
|
|
|
break
|
|
|
|
except AttributeError:
|
|
|
|
# We're not on windows
|
|
|
|
pass
|
|
|
|
log.exception(exc)
|
2016-05-06 18:49:08 +00:00
|
|
|
except Exception as exc:
|
|
|
|
log.exception(exc)
|
|
|
|
|
|
|
|
|
2012-02-20 12:18:13 +00:00
|
|
|
class TestDaemon(object):
|
|
|
|
'''
|
|
|
|
Set up the master and minion daemons, and run related cases
|
|
|
|
'''
|
2012-12-04 13:02:56 +00:00
|
|
|
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120
|
2012-07-20 06:16:14 +00:00
|
|
|
|
2013-06-24 18:37:07 +00:00
|
|
|
def __init__(self, parser):
|
|
|
|
self.parser = parser
|
2016-07-12 19:13:57 +00:00
|
|
|
self.colors = salt.utils.get_colors(self.parser.options.no_colors is False)
|
2016-05-20 14:39:40 +00:00
|
|
|
if salt.utils.is_windows():
|
|
|
|
# There's no shell color support on windows...
|
|
|
|
for key in self.colors:
|
|
|
|
self.colors[key] = ''
|
2012-07-20 06:16:14 +00:00
|
|
|
|
2012-02-20 12:18:13 +00:00
|
|
|
def __enter__(self):
|
|
|
|
'''
|
|
|
|
Start a master and minion
|
|
|
|
'''
|
2015-08-03 12:20:08 +00:00
|
|
|
# Setup the multiprocessing logging queue listener
|
2015-12-19 19:31:19 +00:00
|
|
|
salt_log_setup.setup_multiprocessing_logging_listener(
|
2016-05-06 18:49:08 +00:00
|
|
|
self.master_opts
|
2015-12-19 19:31:19 +00:00
|
|
|
)
|
2015-08-03 12:20:08 +00:00
|
|
|
|
2012-07-26 16:14:00 +00:00
|
|
|
# Set up PATH to mockbin
|
|
|
|
self._enter_mockbin()
|
2012-02-20 12:18:13 +00:00
|
|
|
|
Add warnings to test suite when requisites are not installed
Since we have recently changed the test suite to use new-style
git_pillar, GitPython or Pygit2 is a hard dep for the test suite.
Additionally, when starting up the daemons, if no IPv4 addresses can be
detected (which can happen on docker containers which tend to have
minimal installs) then the suite will time out trying to detect whether
or not the minion/sub-minion has connected, which while it does not
prove fatal for the test suite, it does make the suite take several
minutes to start up and begin running tests. This is because the test
suite invokes the manage.joined runner, which in turn invokes
salt.utils.network.ip_addrs() to get the system's IP addresses to match
against those which are connected. If it can't get the IP addresses,
then the manage.joined runner returns an empty list, and the test suite
believes that no minions have connected, and the function that
periodically runs manage.joined will eventually time out.
This commit adds messages to the console when no suitable gitfs provider
is installed, and when salt.utils.network.ip_addrs() returns an empty
list, to hopefully prompt the user to install the missing requisites.
2017-02-25 22:06:52 +00:00
|
|
|
if not HAS_GITFS:
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_RED}No suitable provider for git_pillar is installed. Install\n'
|
|
|
|
' GitPython or Pygit2.{ENDC}\n'.format(
|
|
|
|
**self.colors
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2014-06-11 21:34:53 +00:00
|
|
|
if self.parser.options.transport == 'zeromq':
|
|
|
|
self.start_zeromq_daemons()
|
|
|
|
elif self.parser.options.transport == 'raet':
|
|
|
|
self.start_raet_daemons()
|
2015-07-20 22:52:48 +00:00
|
|
|
elif self.parser.options.transport == 'tcp':
|
|
|
|
self.start_tcp_daemons()
|
2012-03-29 04:14:31 +00:00
|
|
|
|
2012-11-26 05:44:18 +00:00
|
|
|
self.minion_targets = set(['minion', 'sub_minion'])
|
|
|
|
self.pre_setup_minions()
|
|
|
|
self.setup_minions()
|
2012-11-06 11:20:06 +00:00
|
|
|
|
2014-06-04 12:43:59 +00:00
|
|
|
if getattr(self.parser.options, 'ssh', False):
|
2014-05-09 21:54:21 +00:00
|
|
|
self.prep_ssh()
|
|
|
|
|
2013-06-24 18:37:07 +00:00
|
|
|
if self.parser.options.sysinfo:
|
2014-01-14 15:16:30 +00:00
|
|
|
try:
|
|
|
|
print_header(
|
|
|
|
'~~~~~~~ Versions Report ', inline=True,
|
|
|
|
width=getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
except TypeError:
|
|
|
|
print_header('~~~~~~~ Versions Report ', inline=True)
|
|
|
|
|
2013-08-16 01:01:26 +00:00
|
|
|
print('\n'.join(salt.version.versions_report()))
|
2012-11-06 18:11:26 +00:00
|
|
|
|
2014-01-14 15:16:30 +00:00
|
|
|
try:
|
|
|
|
print_header(
|
|
|
|
'~~~~~~~ Minion Grains Information ', inline=True,
|
|
|
|
width=getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
except TypeError:
|
|
|
|
print_header('~~~~~~~ Minion Grains Information ', inline=True)
|
|
|
|
|
2013-11-08 19:39:12 +00:00
|
|
|
grains = self.client.cmd('minion', 'grains.items')
|
2013-11-08 19:50:38 +00:00
|
|
|
|
|
|
|
minion_opts = self.minion_opts.copy()
|
|
|
|
minion_opts['color'] = self.parser.options.no_colors is False
|
|
|
|
salt.output.display_output(grains, 'grains', minion_opts)
|
2013-02-08 00:35:50 +00:00
|
|
|
|
2014-01-14 15:16:30 +00:00
|
|
|
try:
|
|
|
|
print_header(
|
|
|
|
'=', sep='=', inline=True,
|
|
|
|
width=getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
except TypeError:
|
|
|
|
print_header('', sep='=', inline=True)
|
2012-11-06 18:11:26 +00:00
|
|
|
|
2012-11-26 05:44:18 +00:00
|
|
|
try:
|
|
|
|
return self
|
|
|
|
finally:
|
|
|
|
self.post_setup_minions()
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2015-01-13 16:28:41 +00:00
|
|
|
def start_daemon(self, cls, opts, start_fun):
|
|
|
|
def start(cls, opts, start_fun):
|
2015-10-28 18:06:15 +00:00
|
|
|
salt.utils.appendproctitle('{0}-{1}'.format(self.__class__.__name__, cls.__name__))
|
2015-01-13 16:28:41 +00:00
|
|
|
daemon = cls(opts)
|
|
|
|
getattr(daemon, start_fun)()
|
|
|
|
process = multiprocessing.Process(target=start,
|
|
|
|
args=(cls, opts, start_fun))
|
|
|
|
process.start()
|
|
|
|
return process
|
|
|
|
|
2014-06-11 21:34:53 +00:00
|
|
|
def start_zeromq_daemons(self):
|
|
|
|
'''
|
|
|
|
Fire up the daemons used for zeromq tests
|
|
|
|
'''
|
Add warnings to test suite when requisites are not installed
Since we have recently changed the test suite to use new-style
git_pillar, GitPython or Pygit2 is a hard dep for the test suite.
Additionally, when starting up the daemons, if no IPv4 addresses can be
detected (which can happen on docker containers which tend to have
minimal installs) then the suite will time out trying to detect whether
or not the minion/sub-minion has connected, which while it does not
prove fatal for the test suite, it does make the suite take several
minutes to start up and begin running tests. This is because the test
suite invokes the manage.joined runner, which in turn invokes
salt.utils.network.ip_addrs() to get the system's IP addresses to match
against those which are connected. If it can't get the IP addresses,
then the manage.joined runner returns an empty list, and the test suite
believes that no minions have connected, and the function that
periodically runs manage.joined will eventually time out.
This commit adds messages to the console when no suitable gitfs provider
is installed, and when salt.utils.network.ip_addrs() returns an empty
list, to hopefully prompt the user to install the missing requisites.
2017-02-25 22:06:52 +00:00
|
|
|
if not salt.utils.network.ip_addrs():
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_RED}Unable to list IPv4 addresses. Test suite startup will be\n'
|
|
|
|
' slower. Install iproute/ifconfig to fix this.{ENDC}\n'.format(
|
|
|
|
**self.colors
|
|
|
|
)
|
|
|
|
)
|
2016-05-23 12:54:02 +00:00
|
|
|
self.log_server = ThreadedSocketServer(('localhost', SALT_LOG_PORT), SocketServerRequestHandler)
|
|
|
|
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
|
|
|
|
self.log_server_process.daemon = True
|
|
|
|
self.log_server_process.start()
|
2017-02-26 14:20:41 +00:00
|
|
|
try:
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_YELLOW}Starting salt-master ... {ENDC}'.format(**self.colors)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
|
|
|
self.master_process = start_daemon(
|
|
|
|
daemon_name='salt-master',
|
|
|
|
daemon_id=self.master_opts['id'],
|
|
|
|
daemon_log_prefix='salt-master/{}'.format(self.master_opts['id']),
|
|
|
|
daemon_cli_script_name='master',
|
|
|
|
daemon_config=self.master_opts,
|
|
|
|
daemon_config_dir=TMP_CONF_DIR,
|
|
|
|
daemon_class=SaltMaster,
|
|
|
|
bin_dir_path=SCRIPT_DIR,
|
|
|
|
fail_hard=True,
|
|
|
|
start_timeout=30)
|
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_GREEN}Starting salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
|
|
|
except (RuntimeWarning, RuntimeError):
|
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_RED}Starting salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
2016-05-06 18:49:08 +00:00
|
|
|
|
2017-02-26 14:20:41 +00:00
|
|
|
try:
|
2016-05-06 23:30:07 +00:00
|
|
|
sys.stdout.write(
|
2017-02-26 14:20:41 +00:00
|
|
|
' * {LIGHT_YELLOW}Starting salt-minion ... {ENDC}'.format(**self.colors)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
|
|
|
self.minion_process = start_daemon(
|
|
|
|
daemon_name='salt-minion',
|
|
|
|
daemon_id=self.master_opts['id'],
|
|
|
|
daemon_log_prefix='salt-minion/{}'.format(self.minion_opts['id']),
|
|
|
|
daemon_cli_script_name='minion',
|
|
|
|
daemon_config=self.minion_opts,
|
|
|
|
daemon_config_dir=TMP_CONF_DIR,
|
|
|
|
daemon_class=SaltMinion,
|
|
|
|
bin_dir_path=SCRIPT_DIR,
|
|
|
|
fail_hard=True,
|
|
|
|
start_timeout=30)
|
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
2016-05-06 23:30:07 +00:00
|
|
|
)
|
|
|
|
)
|
2017-02-26 14:20:41 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_GREEN}Starting salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
|
|
|
|
)
|
2016-05-06 23:30:07 +00:00
|
|
|
sys.stdout.flush()
|
2017-02-26 14:20:41 +00:00
|
|
|
except (RuntimeWarning, RuntimeError):
|
2016-05-06 23:30:07 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sys.stdout.write(
|
2017-02-26 14:20:41 +00:00
|
|
|
' * {LIGHT_RED}Starting salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
|
|
try:
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_YELLOW}Starting sub salt-minion ... {ENDC}'.format(**self.colors)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
|
|
|
self.sub_minion_process = start_daemon(
|
|
|
|
daemon_name='sub salt-minion',
|
|
|
|
daemon_id=self.master_opts['id'],
|
|
|
|
daemon_log_prefix='sub-salt-minion/{}'.format(self.sub_minion_opts['id']),
|
|
|
|
daemon_cli_script_name='minion',
|
|
|
|
daemon_config=self.sub_minion_opts,
|
|
|
|
daemon_config_dir=TMP_SUB_MINION_CONF_DIR,
|
|
|
|
daemon_class=SaltMinion,
|
|
|
|
bin_dir_path=SCRIPT_DIR,
|
|
|
|
fail_hard=True,
|
|
|
|
start_timeout=30)
|
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_GREEN}Starting sub salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
|
|
|
except (RuntimeWarning, RuntimeError):
|
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
2016-05-06 23:30:07 +00:00
|
|
|
)
|
|
|
|
)
|
2017-02-26 14:20:41 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_RED}Starting sub salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
|
|
try:
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_YELLOW}Starting syndic salt-master ... {ENDC}'.format(**self.colors)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
|
|
|
self.smaster_process = start_daemon(
|
|
|
|
daemon_name='salt-smaster',
|
|
|
|
daemon_id=self.syndic_master_opts['id'],
|
|
|
|
daemon_log_prefix='salt-smaster/{}'.format(self.syndic_master_opts['id']),
|
|
|
|
daemon_cli_script_name='master',
|
|
|
|
daemon_config=self.syndic_master_opts,
|
|
|
|
daemon_config_dir=TMP_SYNDIC_MASTER_CONF_DIR,
|
|
|
|
daemon_class=SaltMaster,
|
|
|
|
bin_dir_path=SCRIPT_DIR,
|
|
|
|
fail_hard=True,
|
|
|
|
start_timeout=30)
|
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_GREEN}Starting syndic salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
|
|
|
except (RuntimeWarning, RuntimeError):
|
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_RED}Starting syndic salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
|
|
try:
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_YELLOW}Starting salt-syndic ... {ENDC}'.format(**self.colors)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
|
|
|
self.syndic_process = start_daemon(
|
|
|
|
daemon_name='salt-syndic',
|
|
|
|
daemon_id=self.syndic_opts['id'],
|
|
|
|
daemon_log_prefix='salt-syndic/{}'.format(self.syndic_opts['id']),
|
|
|
|
daemon_cli_script_name='syndic',
|
|
|
|
daemon_config=self.syndic_opts,
|
|
|
|
daemon_config_dir=TMP_SYNDIC_MINION_CONF_DIR,
|
|
|
|
daemon_class=SaltSyndic,
|
|
|
|
bin_dir_path=SCRIPT_DIR,
|
|
|
|
fail_hard=True,
|
|
|
|
start_timeout=30)
|
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_GREEN}Starting salt-syndic ... STARTED!\n{ENDC}'.format(**self.colors)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
|
|
|
except (RuntimeWarning, RuntimeError):
|
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sys.stdout.write(
|
|
|
|
' * {LIGHT_RED}Starting salt-syndic ... FAILED!\n{ENDC}'.format(**self.colors)
|
|
|
|
)
|
2016-05-06 23:30:07 +00:00
|
|
|
sys.stdout.flush()
|
2014-06-11 21:34:53 +00:00
|
|
|
|
|
|
|
def start_raet_daemons(self):
|
|
|
|
'''
|
|
|
|
Fire up the raet daemons!
|
|
|
|
'''
|
|
|
|
import salt.daemons.flo
|
2015-01-13 16:28:41 +00:00
|
|
|
self.master_process = self.start_daemon(salt.daemons.flo.IofloMaster,
|
|
|
|
self.master_opts,
|
|
|
|
'start')
|
2014-06-11 21:34:53 +00:00
|
|
|
|
2015-01-13 16:28:41 +00:00
|
|
|
self.minion_process = self.start_daemon(salt.daemons.flo.IofloMinion,
|
|
|
|
self.minion_opts,
|
|
|
|
'tune_in')
|
2014-06-11 21:34:53 +00:00
|
|
|
|
2015-01-13 16:28:41 +00:00
|
|
|
self.sub_minion_process = self.start_daemon(salt.daemons.flo.IofloMinion,
|
|
|
|
self.sub_minion_opts,
|
|
|
|
'tune_in')
|
2014-06-11 21:34:53 +00:00
|
|
|
# Wait for the daemons to all spin up
|
|
|
|
time.sleep(5)
|
|
|
|
|
2015-04-27 16:28:53 +00:00
|
|
|
# self.smaster_process = self.start_daemon(salt.daemons.flo.IofloMaster,
|
2015-01-13 16:29:33 +00:00
|
|
|
# self.syndic_master_opts,
|
|
|
|
# 'start')
|
2014-06-11 21:34:53 +00:00
|
|
|
|
|
|
|
# no raet syndic daemon yet
|
|
|
|
|
2015-07-20 22:52:48 +00:00
|
|
|
start_tcp_daemons = start_zeromq_daemons
|
|
|
|
|
2014-05-09 22:00:36 +00:00
|
|
|
def prep_ssh(self):
|
2014-05-09 21:54:21 +00:00
|
|
|
'''
|
|
|
|
Generate keys and start an ssh daemon on an alternate port
|
|
|
|
'''
|
2016-08-31 12:34:38 +00:00
|
|
|
sys.stdout.write(
|
2016-08-31 12:41:05 +00:00
|
|
|
' * {LIGHT_GREEN}Starting {0} ... {ENDC}'.format(
|
2016-08-31 12:34:38 +00:00
|
|
|
'SSH server',
|
|
|
|
**self.colors
|
|
|
|
)
|
|
|
|
)
|
2014-05-09 21:54:21 +00:00
|
|
|
keygen = salt.utils.which('ssh-keygen')
|
|
|
|
sshd = salt.utils.which('sshd')
|
|
|
|
|
|
|
|
if not (keygen and sshd):
|
|
|
|
print('WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!')
|
|
|
|
return
|
|
|
|
if not os.path.exists(TMP_CONF_DIR):
|
|
|
|
os.makedirs(TMP_CONF_DIR)
|
2014-05-09 22:00:36 +00:00
|
|
|
|
2014-05-23 20:10:08 +00:00
|
|
|
# Generate client key
|
2014-05-12 22:38:16 +00:00
|
|
|
pub_key_test_file = os.path.join(TMP_CONF_DIR, 'key_test.pub')
|
|
|
|
priv_key_test_file = os.path.join(TMP_CONF_DIR, 'key_test')
|
|
|
|
if os.path.exists(pub_key_test_file):
|
|
|
|
os.remove(pub_key_test_file)
|
|
|
|
if os.path.exists(priv_key_test_file):
|
|
|
|
os.remove(priv_key_test_file)
|
2014-05-09 21:54:21 +00:00
|
|
|
keygen_process = subprocess.Popen(
|
2014-05-12 22:38:16 +00:00
|
|
|
[keygen, '-t',
|
|
|
|
'ecdsa',
|
|
|
|
'-b',
|
|
|
|
'521',
|
|
|
|
'-C',
|
|
|
|
'"$(whoami)@$(hostname)-$(date -I)"',
|
|
|
|
'-f',
|
2014-05-23 20:10:08 +00:00
|
|
|
'key_test',
|
|
|
|
'-P',
|
2014-05-12 22:38:16 +00:00
|
|
|
''],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
close_fds=True,
|
|
|
|
cwd=TMP_CONF_DIR
|
2014-05-09 21:54:21 +00:00
|
|
|
)
|
2014-05-12 22:38:16 +00:00
|
|
|
_, keygen_err = keygen_process.communicate()
|
|
|
|
if keygen_err:
|
2015-06-06 13:47:52 +00:00
|
|
|
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_err)))
|
2014-05-12 22:38:16 +00:00
|
|
|
sshd_config_path = os.path.join(FILES, 'conf/_ssh/sshd_config')
|
2014-05-12 18:06:09 +00:00
|
|
|
shutil.copy(sshd_config_path, TMP_CONF_DIR)
|
2014-05-09 21:54:21 +00:00
|
|
|
auth_key_file = os.path.join(TMP_CONF_DIR, 'key_test.pub')
|
2014-05-23 20:10:08 +00:00
|
|
|
|
|
|
|
# Generate server key
|
|
|
|
server_key_dir = os.path.join(TMP_CONF_DIR, 'server')
|
|
|
|
if not os.path.exists(server_key_dir):
|
|
|
|
os.makedirs(server_key_dir)
|
|
|
|
server_dsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key')
|
|
|
|
server_dsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key.pub')
|
|
|
|
server_ecdsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key')
|
|
|
|
server_ecdsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key.pub')
|
|
|
|
server_ed25519_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ed25519_key')
|
|
|
|
server_ed25519_pub_key_file = os.path.join(server_key_dir, 'ssh_host.ed25519_key.pub')
|
|
|
|
|
|
|
|
for server_key_file in (server_dsa_priv_key_file,
|
|
|
|
server_dsa_pub_key_file,
|
|
|
|
server_ecdsa_priv_key_file,
|
|
|
|
server_ecdsa_pub_key_file,
|
|
|
|
server_ed25519_priv_key_file,
|
|
|
|
server_ed25519_pub_key_file):
|
|
|
|
if os.path.exists(server_key_file):
|
|
|
|
os.remove(server_key_file)
|
|
|
|
|
|
|
|
keygen_process_dsa = subprocess.Popen(
|
|
|
|
[keygen, '-t',
|
|
|
|
'dsa',
|
|
|
|
'-b',
|
|
|
|
'1024',
|
|
|
|
'-C',
|
|
|
|
'"$(whoami)@$(hostname)-$(date -I)"',
|
|
|
|
'-f',
|
|
|
|
'ssh_host_dsa_key',
|
|
|
|
'-P',
|
|
|
|
''],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
close_fds=True,
|
|
|
|
cwd=server_key_dir
|
|
|
|
)
|
|
|
|
_, keygen_dsa_err = keygen_process_dsa.communicate()
|
|
|
|
if keygen_dsa_err:
|
2015-06-06 13:47:52 +00:00
|
|
|
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_dsa_err)))
|
2014-05-23 20:10:08 +00:00
|
|
|
|
|
|
|
keygen_process_ecdsa = subprocess.Popen(
|
|
|
|
[keygen, '-t',
|
|
|
|
'ecdsa',
|
|
|
|
'-b',
|
|
|
|
'521',
|
|
|
|
'-C',
|
|
|
|
'"$(whoami)@$(hostname)-$(date -I)"',
|
|
|
|
'-f',
|
|
|
|
'ssh_host_ecdsa_key',
|
|
|
|
'-P',
|
|
|
|
''],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
close_fds=True,
|
|
|
|
cwd=server_key_dir
|
|
|
|
)
|
|
|
|
_, keygen_escda_err = keygen_process_ecdsa.communicate()
|
|
|
|
if keygen_escda_err:
|
2015-06-06 13:47:52 +00:00
|
|
|
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_escda_err)))
|
2014-05-23 20:10:08 +00:00
|
|
|
|
|
|
|
keygen_process_ed25519 = subprocess.Popen(
|
|
|
|
[keygen, '-t',
|
|
|
|
'ed25519',
|
|
|
|
'-b',
|
|
|
|
'521',
|
|
|
|
'-C',
|
|
|
|
'"$(whoami)@$(hostname)-$(date -I)"',
|
|
|
|
'-f',
|
|
|
|
'ssh_host_ed25519_key',
|
|
|
|
'-P',
|
|
|
|
''],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
close_fds=True,
|
|
|
|
cwd=server_key_dir
|
|
|
|
)
|
|
|
|
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
|
|
|
|
if keygen_ed25519_err:
|
2015-06-06 13:47:52 +00:00
|
|
|
print('ssh-keygen had errors: {0}'.format(salt.utils.to_str(keygen_ed25519_err)))
|
2014-05-23 20:10:08 +00:00
|
|
|
|
2014-11-26 17:39:18 +00:00
|
|
|
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, 'sshd_config'), 'a') as ssh_config:
|
2014-05-09 21:54:21 +00:00
|
|
|
ssh_config.write('AuthorizedKeysFile {0}\n'.format(auth_key_file))
|
2015-02-04 23:16:41 +00:00
|
|
|
if not keygen_dsa_err:
|
|
|
|
ssh_config.write('HostKey {0}\n'.format(server_dsa_priv_key_file))
|
|
|
|
if not keygen_escda_err:
|
|
|
|
ssh_config.write('HostKey {0}\n'.format(server_ecdsa_priv_key_file))
|
|
|
|
if not keygen_ed25519_err:
|
|
|
|
ssh_config.write('HostKey {0}\n'.format(server_ed25519_priv_key_file))
|
2014-10-30 06:45:52 +00:00
|
|
|
|
|
|
|
self.sshd_pidfile = os.path.join(TMP_CONF_DIR, 'sshd.pid')
|
2014-05-12 22:38:16 +00:00
|
|
|
self.sshd_process = subprocess.Popen(
|
2014-10-30 06:45:52 +00:00
|
|
|
[sshd, '-f', 'sshd_config', '-oPidFile={0}'.format(self.sshd_pidfile)],
|
2014-05-12 22:38:16 +00:00
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
close_fds=True,
|
|
|
|
cwd=TMP_CONF_DIR
|
2014-05-09 21:54:21 +00:00
|
|
|
)
|
2014-05-12 22:38:16 +00:00
|
|
|
_, sshd_err = self.sshd_process.communicate()
|
|
|
|
if sshd_err:
|
2015-06-06 13:47:52 +00:00
|
|
|
print('sshd had errors on startup: {0}'.format(salt.utils.to_str(sshd_err)))
|
2014-12-13 00:11:41 +00:00
|
|
|
else:
|
|
|
|
os.environ['SSH_DAEMON_RUNNING'] = 'True'
|
2014-05-12 22:38:16 +00:00
|
|
|
roster_path = os.path.join(FILES, 'conf/_ssh/roster')
|
|
|
|
shutil.copy(roster_path, TMP_CONF_DIR)
|
2017-04-01 13:21:19 +00:00
|
|
|
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, 'roster'), 'a') as roster:
|
|
|
|
roster.write(' user: {0}\n'.format(RUNNING_TESTS_USER))
|
|
|
|
roster.write(' priv: {0}/{1}'.format(TMP_CONF_DIR, 'key_test'))
|
2016-08-31 12:41:05 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
' {LIGHT_GREEN}STARTED!\n{ENDC}'.format(
|
|
|
|
**self.colors
|
|
|
|
)
|
|
|
|
)
|
2014-05-09 21:54:21 +00:00
|
|
|
|
2015-03-13 22:15:23 +00:00
|
|
|
@classmethod
|
|
|
|
def config(cls, role):
|
|
|
|
'''
|
|
|
|
Return a configuration for a master/minion/syndic.
|
|
|
|
|
|
|
|
Currently these roles are:
|
|
|
|
* master
|
|
|
|
* minion
|
|
|
|
* syndic
|
|
|
|
* syndic_master
|
|
|
|
* sub_minion
|
|
|
|
'''
|
|
|
|
return RUNTIME_CONFIGS[role]
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def config_location(cls):
|
|
|
|
return TMP_CONF_DIR
|
|
|
|
|
2013-01-11 16:21:04 +00:00
|
|
|
@property
|
|
|
|
def client(self):
|
|
|
|
'''
|
|
|
|
Return a local client which will be used for example to ping and sync
|
|
|
|
the test minions.
|
|
|
|
|
2014-04-30 19:06:27 +00:00
|
|
|
This client is defined as a class attribute because its creation needs
|
2013-01-11 16:21:04 +00:00
|
|
|
to be deferred to a latter stage. If created it on `__enter__` like it
|
|
|
|
previously was, it would not receive the master events.
|
|
|
|
'''
|
2014-10-07 11:56:20 +00:00
|
|
|
if 'runtime_client' not in RUNTIME_CONFIGS:
|
2014-10-07 13:02:25 +00:00
|
|
|
RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
|
|
|
|
mopts=self.master_opts
|
|
|
|
)
|
2014-10-07 11:56:20 +00:00
|
|
|
return RUNTIME_CONFIGS['runtime_client']
|
2013-01-11 16:21:04 +00:00
|
|
|
|
2014-06-12 23:59:55 +00:00
|
|
|
@classmethod
|
|
|
|
def transplant_configs(cls, transport='zeromq'):
|
|
|
|
if os.path.isdir(TMP_CONF_DIR):
|
|
|
|
shutil.rmtree(TMP_CONF_DIR)
|
|
|
|
os.makedirs(TMP_CONF_DIR)
|
2016-05-08 18:39:57 +00:00
|
|
|
os.makedirs(TMP_SUB_MINION_CONF_DIR)
|
|
|
|
os.makedirs(TMP_SYNDIC_MASTER_CONF_DIR)
|
2016-05-20 13:45:09 +00:00
|
|
|
os.makedirs(TMP_SYNDIC_MINION_CONF_DIR)
|
2015-08-27 04:26:07 +00:00
|
|
|
print(' * Transplanting configuration files to \'{0}\''.format(TMP_CONF_DIR))
|
2016-05-09 17:57:13 +00:00
|
|
|
tests_known_hosts_file = os.path.join(TMP_CONF_DIR, 'salt_ssh_known_hosts')
|
|
|
|
with salt.utils.fopen(tests_known_hosts_file, 'w') as known_hosts:
|
2014-10-30 06:45:52 +00:00
|
|
|
known_hosts.write('')
|
2016-06-22 23:56:50 +00:00
|
|
|
|
2016-07-08 22:14:48 +00:00
|
|
|
# This master connects to syndic_master via a syndic
|
2016-06-22 23:56:50 +00:00
|
|
|
master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'master'))
|
2016-05-09 17:57:13 +00:00
|
|
|
master_opts['known_hosts_file'] = tests_known_hosts_file
|
2016-06-28 16:08:45 +00:00
|
|
|
master_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
|
2017-04-01 13:21:19 +00:00
|
|
|
master_opts['user'] = RUNNING_TESTS_USER
|
2016-07-07 18:27:48 +00:00
|
|
|
master_opts['config_dir'] = TMP_CONF_DIR
|
|
|
|
master_opts['root_dir'] = os.path.join(TMP, 'rootdir')
|
|
|
|
master_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki', 'master')
|
2014-06-12 23:59:55 +00:00
|
|
|
|
2016-07-08 22:14:48 +00:00
|
|
|
# This is the syndic for master
|
|
|
|
# Let's start with a copy of the syndic master configuration
|
|
|
|
syndic_opts = copy.deepcopy(master_opts)
|
|
|
|
# Let's update with the syndic configuration
|
|
|
|
syndic_opts.update(salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic')))
|
|
|
|
syndic_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
|
|
|
|
syndic_opts['config_dir'] = TMP_SYNDIC_MINION_CONF_DIR
|
|
|
|
|
|
|
|
# This minion connects to master
|
2016-06-22 23:56:50 +00:00
|
|
|
minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'minion'))
|
2016-07-08 21:59:08 +00:00
|
|
|
minion_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
|
2017-04-01 13:21:19 +00:00
|
|
|
minion_opts['user'] = RUNNING_TESTS_USER
|
2016-06-24 15:10:00 +00:00
|
|
|
minion_opts['config_dir'] = TMP_CONF_DIR
|
2016-07-07 18:27:48 +00:00
|
|
|
minion_opts['root_dir'] = os.path.join(TMP, 'rootdir')
|
2016-07-20 15:44:23 +00:00
|
|
|
minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki')
|
2016-07-26 22:00:07 +00:00
|
|
|
minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
|
|
|
|
minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
|
2014-06-12 23:59:55 +00:00
|
|
|
|
2016-07-08 22:14:48 +00:00
|
|
|
# This sub_minion also connects to master
|
2014-06-12 23:59:55 +00:00
|
|
|
sub_minion_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'sub_minion'))
|
2016-07-08 21:59:08 +00:00
|
|
|
sub_minion_opts['cachedir'] = os.path.join(TMP, 'rootdir-sub-minion', 'cache')
|
2017-04-01 13:21:19 +00:00
|
|
|
sub_minion_opts['user'] = RUNNING_TESTS_USER
|
2016-06-24 15:10:00 +00:00
|
|
|
sub_minion_opts['config_dir'] = TMP_SUB_MINION_CONF_DIR
|
2016-05-20 13:45:09 +00:00
|
|
|
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
|
2016-07-07 18:27:48 +00:00
|
|
|
sub_minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir-sub-minion', 'pki', 'minion')
|
2016-07-26 22:00:07 +00:00
|
|
|
sub_minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
|
|
|
|
sub_minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
|
2014-06-12 23:59:55 +00:00
|
|
|
|
2016-07-08 22:14:48 +00:00
|
|
|
# This is the master of masters
|
2014-06-12 23:59:55 +00:00
|
|
|
syndic_master_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'syndic_master'))
|
2016-07-07 18:27:48 +00:00
|
|
|
syndic_master_opts['cachedir'] = os.path.join(TMP, 'rootdir-syndic-master', 'cache')
|
2017-04-01 13:21:19 +00:00
|
|
|
syndic_master_opts['user'] = RUNNING_TESTS_USER
|
2016-06-24 15:10:00 +00:00
|
|
|
syndic_master_opts['config_dir'] = TMP_SYNDIC_MASTER_CONF_DIR
|
2016-07-07 18:27:48 +00:00
|
|
|
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
|
|
|
|
syndic_master_opts['pki_dir'] = os.path.join(TMP, 'rootdir-syndic-master', 'pki', 'master')
|
2014-06-12 23:59:55 +00:00
|
|
|
|
|
|
|
if transport == 'raet':
|
|
|
|
master_opts['transport'] = 'raet'
|
|
|
|
master_opts['raet_port'] = 64506
|
|
|
|
minion_opts['transport'] = 'raet'
|
|
|
|
minion_opts['raet_port'] = 64510
|
|
|
|
sub_minion_opts['transport'] = 'raet'
|
|
|
|
sub_minion_opts['raet_port'] = 64520
|
2015-04-24 22:27:22 +00:00
|
|
|
# syndic_master_opts['transport'] = 'raet'
|
2014-06-12 23:59:55 +00:00
|
|
|
|
2015-07-20 22:52:48 +00:00
|
|
|
if transport == 'tcp':
|
|
|
|
master_opts['transport'] = 'tcp'
|
|
|
|
minion_opts['transport'] = 'tcp'
|
|
|
|
sub_minion_opts['transport'] = 'tcp'
|
2015-07-21 22:42:33 +00:00
|
|
|
syndic_master_opts['transport'] = 'tcp'
|
2015-07-20 22:52:48 +00:00
|
|
|
|
2014-06-12 23:59:55 +00:00
|
|
|
# Set up config options that require internal data
|
2016-05-20 13:15:13 +00:00
|
|
|
master_opts['pillar_roots'] = syndic_master_opts['pillar_roots'] = {
|
2014-06-12 23:59:55 +00:00
|
|
|
'base': [os.path.join(FILES, 'pillar', 'base')]
|
|
|
|
}
|
2016-05-20 13:15:13 +00:00
|
|
|
master_opts['file_roots'] = syndic_master_opts['file_roots'] = {
|
2014-06-12 23:59:55 +00:00
|
|
|
'base': [
|
|
|
|
os.path.join(FILES, 'file', 'base'),
|
|
|
|
# Let's support runtime created files that can be used like:
|
|
|
|
# salt://my-temp-file.txt
|
|
|
|
TMP_STATE_TREE
|
|
|
|
],
|
|
|
|
# Alternate root to test __env__ choices
|
|
|
|
'prod': [
|
|
|
|
os.path.join(FILES, 'file', 'prod'),
|
|
|
|
TMP_PRODENV_STATE_TREE
|
|
|
|
]
|
|
|
|
}
|
2017-02-10 17:49:01 +00:00
|
|
|
master_opts.setdefault('reactor', []).append(
|
|
|
|
{
|
|
|
|
'salt/minion/*/start': [
|
|
|
|
os.path.join(FILES, 'reactor-sync-minion.sls')
|
|
|
|
],
|
|
|
|
}
|
|
|
|
)
|
2016-05-20 13:15:13 +00:00
|
|
|
for opts_dict in (master_opts, syndic_master_opts):
|
|
|
|
if 'ext_pillar' not in opts_dict:
|
|
|
|
opts_dict['ext_pillar'] = []
|
|
|
|
if salt.utils.is_windows():
|
|
|
|
opts_dict['ext_pillar'].append(
|
|
|
|
{'cmd_yaml': 'type {0}'.format(os.path.join(FILES, 'ext.yaml'))})
|
|
|
|
else:
|
|
|
|
opts_dict['ext_pillar'].append(
|
|
|
|
{'cmd_yaml': 'cat {0}'.format(os.path.join(FILES, 'ext.yaml'))})
|
2014-06-12 23:59:55 +00:00
|
|
|
|
2016-05-20 14:29:31 +00:00
|
|
|
for opts_dict in (master_opts, syndic_master_opts):
|
|
|
|
# We need to copy the extension modules into the new master root_dir or
|
|
|
|
# it will be prefixed by it
|
|
|
|
new_extension_modules_path = os.path.join(opts_dict['root_dir'], 'extension_modules')
|
|
|
|
if not os.path.exists(new_extension_modules_path):
|
|
|
|
shutil.copytree(
|
|
|
|
os.path.join(
|
|
|
|
INTEGRATION_TEST_DIR, 'files', 'extension_modules'
|
|
|
|
),
|
|
|
|
new_extension_modules_path
|
|
|
|
)
|
2016-05-20 14:33:40 +00:00
|
|
|
opts_dict['extension_modules'] = os.path.join(opts_dict['root_dir'], 'extension_modules')
|
2014-06-12 23:59:55 +00:00
|
|
|
|
|
|
|
# Point the config values to the correct temporary paths
|
|
|
|
for name in ('hosts', 'aliases'):
|
|
|
|
optname = '{0}.file'.format(name)
|
|
|
|
optname_path = os.path.join(TMP, name)
|
|
|
|
master_opts[optname] = optname_path
|
|
|
|
minion_opts[optname] = optname_path
|
|
|
|
sub_minion_opts[optname] = optname_path
|
2016-05-20 13:15:13 +00:00
|
|
|
syndic_opts[optname] = optname_path
|
|
|
|
syndic_master_opts[optname] = optname_path
|
2014-06-12 23:59:55 +00:00
|
|
|
|
2017-02-26 14:20:41 +00:00
|
|
|
master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
|
|
|
|
minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
|
|
|
|
sub_minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
|
|
|
|
syndic_opts['runtests_conn_check_port'] = get_unused_localhost_port()
|
|
|
|
syndic_master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
|
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
for conf in (master_opts, minion_opts, sub_minion_opts, syndic_opts, syndic_master_opts):
|
2017-02-26 14:20:41 +00:00
|
|
|
if 'engines' not in conf:
|
|
|
|
conf['engines'] = []
|
|
|
|
conf['engines'].append({'salt_runtests': {}})
|
|
|
|
|
|
|
|
if 'engines_dirs' not in conf:
|
|
|
|
conf['engines_dirs'] = []
|
|
|
|
|
|
|
|
conf['engines_dirs'].insert(0, ENGINES_DIR)
|
|
|
|
|
2016-05-23 12:54:02 +00:00
|
|
|
if 'log_handlers_dirs' not in conf:
|
|
|
|
conf['log_handlers_dirs'] = []
|
|
|
|
conf['log_handlers_dirs'].insert(0, LOG_HANDLERS_DIR)
|
|
|
|
conf['runtests_log_port'] = SALT_LOG_PORT
|
2016-05-06 18:49:08 +00:00
|
|
|
|
2014-06-12 23:59:55 +00:00
|
|
|
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
|
|
|
|
for entry in os.listdir(CONF_DIR):
|
2016-05-20 13:15:13 +00:00
|
|
|
if entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master'):
|
2014-06-12 23:59:55 +00:00
|
|
|
# These have runtime computed values and will be handled
|
|
|
|
# differently
|
|
|
|
continue
|
|
|
|
entry_path = os.path.join(CONF_DIR, entry)
|
|
|
|
if os.path.isfile(entry_path):
|
|
|
|
shutil.copy(
|
|
|
|
entry_path,
|
|
|
|
os.path.join(TMP_CONF_DIR, entry)
|
|
|
|
)
|
|
|
|
elif os.path.isdir(entry_path):
|
|
|
|
shutil.copytree(
|
|
|
|
entry_path,
|
|
|
|
os.path.join(TMP_CONF_DIR, entry)
|
|
|
|
)
|
|
|
|
|
2016-05-20 13:15:13 +00:00
|
|
|
for entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master'):
|
2014-06-12 23:59:55 +00:00
|
|
|
computed_config = copy.deepcopy(locals()['{0}_opts'.format(entry)])
|
2016-08-11 16:45:24 +00:00
|
|
|
with salt.utils.fopen(os.path.join(TMP_CONF_DIR, entry), 'w') as fp_:
|
|
|
|
fp_.write(yaml.dump(computed_config, default_flow_style=False))
|
2016-05-06 18:49:08 +00:00
|
|
|
sub_minion_computed_config = copy.deepcopy(sub_minion_opts)
|
2016-05-08 18:39:57 +00:00
|
|
|
salt.utils.fopen(os.path.join(TMP_SUB_MINION_CONF_DIR, 'minion'), 'w').write(
|
2016-05-06 18:49:08 +00:00
|
|
|
yaml.dump(sub_minion_computed_config, default_flow_style=False)
|
|
|
|
)
|
2016-05-08 18:39:57 +00:00
|
|
|
shutil.copyfile(os.path.join(TMP_CONF_DIR, 'master'), os.path.join(TMP_SUB_MINION_CONF_DIR, 'master'))
|
2016-05-20 13:15:13 +00:00
|
|
|
|
2016-05-06 18:49:08 +00:00
|
|
|
syndic_master_computed_config = copy.deepcopy(syndic_master_opts)
|
2016-05-08 18:39:57 +00:00
|
|
|
salt.utils.fopen(os.path.join(TMP_SYNDIC_MASTER_CONF_DIR, 'master'), 'w').write(
|
2016-05-06 18:49:08 +00:00
|
|
|
yaml.dump(syndic_master_computed_config, default_flow_style=False)
|
|
|
|
)
|
2016-05-20 13:15:13 +00:00
|
|
|
syndic_computed_config = copy.deepcopy(syndic_opts)
|
2016-05-20 13:45:09 +00:00
|
|
|
salt.utils.fopen(os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'minion'), 'w').write(
|
2016-05-20 13:15:13 +00:00
|
|
|
yaml.dump(syndic_computed_config, default_flow_style=False)
|
|
|
|
)
|
2016-05-20 13:45:09 +00:00
|
|
|
shutil.copyfile(os.path.join(TMP_CONF_DIR, 'master'), os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'master'))
|
2014-06-12 23:59:55 +00:00
|
|
|
# <---- Transcribe Configuration -----------------------------------------------------------------------------
|
|
|
|
|
2014-06-13 18:31:01 +00:00
|
|
|
# ----- Verify Environment ---------------------------------------------------------------------------------->
|
|
|
|
master_opts = salt.config.master_config(os.path.join(TMP_CONF_DIR, 'master'))
|
2016-05-20 13:45:09 +00:00
|
|
|
minion_opts = salt.config.minion_config(os.path.join(TMP_CONF_DIR, 'minion'))
|
2014-06-13 18:31:01 +00:00
|
|
|
syndic_opts = salt.config.syndic_config(
|
2016-05-20 13:45:09 +00:00
|
|
|
os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'master'),
|
|
|
|
os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'minion'),
|
2014-06-13 18:31:01 +00:00
|
|
|
)
|
2016-05-20 13:45:09 +00:00
|
|
|
sub_minion_opts = salt.config.minion_config(os.path.join(TMP_SUB_MINION_CONF_DIR, 'minion'))
|
|
|
|
syndic_master_opts = salt.config.master_config(os.path.join(TMP_SYNDIC_MASTER_CONF_DIR, 'master'))
|
2014-06-13 18:31:01 +00:00
|
|
|
|
2014-11-07 20:39:49 +00:00
|
|
|
RUNTIME_CONFIGS['master'] = freeze(master_opts)
|
|
|
|
RUNTIME_CONFIGS['minion'] = freeze(minion_opts)
|
|
|
|
RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts)
|
|
|
|
RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts)
|
|
|
|
RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts)
|
|
|
|
|
2014-06-13 18:31:01 +00:00
|
|
|
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
|
|
|
|
os.path.join(master_opts['pki_dir'], 'minions_pre'),
|
|
|
|
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
|
2014-09-22 08:01:24 +00:00
|
|
|
os.path.join(master_opts['pki_dir'], 'minions_denied'),
|
2014-06-13 18:31:01 +00:00
|
|
|
os.path.join(master_opts['cachedir'], 'jobs'),
|
2014-07-07 21:56:16 +00:00
|
|
|
os.path.join(master_opts['cachedir'], 'raet'),
|
2014-10-07 11:19:58 +00:00
|
|
|
os.path.join(master_opts['root_dir'], 'cache', 'tokens'),
|
2014-06-13 18:31:01 +00:00
|
|
|
os.path.join(syndic_master_opts['pki_dir'], 'minions'),
|
|
|
|
os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'),
|
|
|
|
os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'),
|
|
|
|
os.path.join(syndic_master_opts['cachedir'], 'jobs'),
|
2014-07-07 21:56:16 +00:00
|
|
|
os.path.join(syndic_master_opts['cachedir'], 'raet'),
|
2014-10-07 11:19:58 +00:00
|
|
|
os.path.join(syndic_master_opts['root_dir'], 'cache', 'tokens'),
|
2014-06-13 18:31:01 +00:00
|
|
|
os.path.join(master_opts['pki_dir'], 'accepted'),
|
|
|
|
os.path.join(master_opts['pki_dir'], 'rejected'),
|
|
|
|
os.path.join(master_opts['pki_dir'], 'pending'),
|
|
|
|
os.path.join(syndic_master_opts['pki_dir'], 'accepted'),
|
|
|
|
os.path.join(syndic_master_opts['pki_dir'], 'rejected'),
|
|
|
|
os.path.join(syndic_master_opts['pki_dir'], 'pending'),
|
2014-07-07 21:56:16 +00:00
|
|
|
os.path.join(syndic_master_opts['cachedir'], 'raet'),
|
2014-06-13 18:31:01 +00:00
|
|
|
|
|
|
|
os.path.join(minion_opts['pki_dir'], 'accepted'),
|
|
|
|
os.path.join(minion_opts['pki_dir'], 'rejected'),
|
|
|
|
os.path.join(minion_opts['pki_dir'], 'pending'),
|
2014-07-07 21:56:16 +00:00
|
|
|
os.path.join(minion_opts['cachedir'], 'raet'),
|
2014-06-13 18:31:01 +00:00
|
|
|
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
|
|
|
|
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
|
|
|
|
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
|
2014-07-07 21:56:16 +00:00
|
|
|
os.path.join(sub_minion_opts['cachedir'], 'raet'),
|
2014-06-13 18:31:01 +00:00
|
|
|
os.path.dirname(master_opts['log_file']),
|
2014-06-13 18:32:59 +00:00
|
|
|
minion_opts['extension_modules'],
|
|
|
|
sub_minion_opts['extension_modules'],
|
2014-06-13 18:31:01 +00:00
|
|
|
sub_minion_opts['pki_dir'],
|
|
|
|
master_opts['sock_dir'],
|
|
|
|
syndic_master_opts['sock_dir'],
|
|
|
|
sub_minion_opts['sock_dir'],
|
|
|
|
minion_opts['sock_dir'],
|
|
|
|
TMP_STATE_TREE,
|
|
|
|
TMP_PRODENV_STATE_TREE,
|
|
|
|
TMP,
|
|
|
|
],
|
2017-04-01 13:21:19 +00:00
|
|
|
RUNNING_TESTS_USER)
|
2014-06-13 23:22:03 +00:00
|
|
|
|
|
|
|
cls.master_opts = master_opts
|
|
|
|
cls.minion_opts = minion_opts
|
|
|
|
cls.sub_minion_opts = sub_minion_opts
|
|
|
|
cls.syndic_opts = syndic_opts
|
|
|
|
cls.syndic_master_opts = syndic_master_opts
|
2014-06-13 18:31:01 +00:00
|
|
|
# <---- Verify Environment -----------------------------------------------------------------------------------
|
|
|
|
|
2012-02-20 12:18:13 +00:00
|
|
|
def __exit__(self, type, value, traceback):
|
|
|
|
'''
|
|
|
|
Kill the minion and master processes
|
|
|
|
'''
|
2016-06-24 15:10:00 +00:00
|
|
|
self.sub_minion_process.terminate()
|
2016-05-06 18:49:08 +00:00
|
|
|
self.minion_process.terminate()
|
|
|
|
self.master_process.terminate()
|
2014-06-12 18:33:56 +00:00
|
|
|
try:
|
2016-05-06 18:49:08 +00:00
|
|
|
self.syndic_process.terminate()
|
2014-06-12 18:33:56 +00:00
|
|
|
except AttributeError:
|
|
|
|
pass
|
|
|
|
try:
|
2016-05-06 18:49:08 +00:00
|
|
|
self.smaster_process.terminate()
|
2014-06-12 18:33:56 +00:00
|
|
|
except AttributeError:
|
|
|
|
pass
|
2016-05-06 18:49:08 +00:00
|
|
|
#salt.utils.process.clean_proc(self.sub_minion_process, wait_for_kill=50)
|
|
|
|
#self.sub_minion_process.join()
|
|
|
|
#salt.utils.process.clean_proc(self.minion_process, wait_for_kill=50)
|
|
|
|
#self.minion_process.join()
|
|
|
|
#salt.utils.process.clean_proc(self.master_process, wait_for_kill=50)
|
|
|
|
#self.master_process.join()
|
|
|
|
#try:
|
|
|
|
# salt.utils.process.clean_proc(self.syndic_process, wait_for_kill=50)
|
|
|
|
# self.syndic_process.join()
|
|
|
|
#except AttributeError:
|
|
|
|
# pass
|
|
|
|
#try:
|
|
|
|
# salt.utils.process.clean_proc(self.smaster_process, wait_for_kill=50)
|
|
|
|
# self.smaster_process.join()
|
|
|
|
#except AttributeError:
|
|
|
|
# pass
|
2016-05-23 12:54:02 +00:00
|
|
|
self.log_server.server_close()
|
|
|
|
self.log_server.shutdown()
|
2012-07-26 16:14:00 +00:00
|
|
|
self._exit_mockbin()
|
2014-05-12 22:38:16 +00:00
|
|
|
self._exit_ssh()
|
2016-05-23 12:54:02 +00:00
|
|
|
self.log_server_process.join()
|
2015-08-03 20:37:56 +00:00
|
|
|
# Shutdown the multiprocessing logging queue listener
|
2015-10-29 16:26:14 +00:00
|
|
|
salt_log_setup.shutdown_multiprocessing_logging()
|
2016-10-05 08:14:27 +00:00
|
|
|
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2012-11-26 05:44:18 +00:00
|
|
|
def pre_setup_minions(self):
|
2013-01-10 07:25:02 +00:00
|
|
|
'''
|
2012-11-26 05:44:18 +00:00
|
|
|
Subclass this method for additional minion setups.
|
2013-01-10 07:25:02 +00:00
|
|
|
'''
|
2012-11-26 05:44:18 +00:00
|
|
|
|
|
|
|
def setup_minions(self):
|
2016-05-09 00:27:09 +00:00
|
|
|
'''
|
|
|
|
Minions setup routines
|
|
|
|
'''
|
2012-11-26 05:44:18 +00:00
|
|
|
|
|
|
|
def post_setup_minions(self):
|
2013-01-20 01:22:54 +00:00
|
|
|
'''
|
2012-11-26 05:44:18 +00:00
|
|
|
Subclass this method to execute code after the minions have been setup
|
2013-01-20 01:22:54 +00:00
|
|
|
'''
|
2012-11-26 05:44:18 +00:00
|
|
|
|
2012-07-26 16:14:00 +00:00
|
|
|
def _enter_mockbin(self):
|
|
|
|
path = os.environ.get('PATH', '')
|
|
|
|
path_items = path.split(os.pathsep)
|
|
|
|
if MOCKBIN not in path_items:
|
|
|
|
path_items.insert(0, MOCKBIN)
|
|
|
|
os.environ['PATH'] = os.pathsep.join(path_items)
|
|
|
|
|
2014-05-12 22:38:16 +00:00
|
|
|
def _exit_ssh(self):
|
|
|
|
if hasattr(self, 'sshd_process'):
|
|
|
|
try:
|
|
|
|
self.sshd_process.kill()
|
2014-10-30 06:45:52 +00:00
|
|
|
except OSError as exc:
|
|
|
|
if exc.errno != 3:
|
|
|
|
raise
|
2014-11-26 23:38:34 +00:00
|
|
|
with salt.utils.fopen(self.sshd_pidfile) as fhr:
|
|
|
|
try:
|
|
|
|
os.kill(int(fhr.read()), signal.SIGKILL)
|
|
|
|
except OSError as exc:
|
|
|
|
if exc.errno != 3:
|
|
|
|
raise
|
2014-05-12 22:38:16 +00:00
|
|
|
|
2012-07-26 16:14:00 +00:00
|
|
|
def _exit_mockbin(self):
|
|
|
|
path = os.environ.get('PATH', '')
|
|
|
|
path_items = path.split(os.pathsep)
|
|
|
|
try:
|
|
|
|
path_items.remove(MOCKBIN)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
os.environ['PATH'] = os.pathsep.join(path_items)
|
|
|
|
|
2014-06-14 13:03:38 +00:00
|
|
|
@classmethod
|
|
|
|
def clean(cls):
|
2012-04-04 05:14:26 +00:00
|
|
|
'''
|
|
|
|
Clean out the tmp files
|
|
|
|
'''
|
2016-05-19 17:13:03 +00:00
|
|
|
def remove_readonly(func, path, excinfo):
|
2017-02-09 18:02:57 +00:00
|
|
|
# Give full permissions to owner
|
|
|
|
os.chmod(path, stat.S_IRWXU)
|
2016-05-19 17:13:03 +00:00
|
|
|
func(path)
|
|
|
|
|
2013-11-02 22:40:09 +00:00
|
|
|
for dirname in (TMP, TMP_STATE_TREE, TMP_PRODENV_STATE_TREE):
|
|
|
|
if os.path.isdir(dirname):
|
2016-05-19 17:13:03 +00:00
|
|
|
shutil.rmtree(dirname, onerror=remove_readonly)
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2012-11-09 15:25:16 +00:00
|
|
|
def wait_for_jid(self, targets, jid, timeout=120):
|
2012-11-26 05:44:18 +00:00
|
|
|
time.sleep(1) # Allow some time for minions to accept jobs
|
2012-11-17 17:37:53 +00:00
|
|
|
now = datetime.now()
|
|
|
|
expire = now + timedelta(seconds=timeout)
|
2012-11-26 05:44:18 +00:00
|
|
|
job_finished = False
|
2012-11-17 17:37:53 +00:00
|
|
|
while now <= expire:
|
2012-11-09 15:25:16 +00:00
|
|
|
running = self.__client_job_running(targets, jid)
|
2014-01-14 15:16:30 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
if not running and job_finished is False:
|
|
|
|
# Let's not have false positives and wait one more seconds
|
|
|
|
job_finished = True
|
|
|
|
elif not running and job_finished is True:
|
2012-11-09 15:25:16 +00:00
|
|
|
return True
|
2012-11-26 05:44:18 +00:00
|
|
|
elif running and job_finished is True:
|
|
|
|
job_finished = False
|
|
|
|
|
|
|
|
if job_finished is False:
|
|
|
|
sys.stdout.write(
|
2014-12-25 07:06:29 +00:00
|
|
|
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
|
2012-11-26 05:44:18 +00:00
|
|
|
'{0}'.format(expire - now).rsplit('.', 1)[0],
|
|
|
|
', '.join(running),
|
|
|
|
**self.colors
|
|
|
|
)
|
2012-11-09 15:25:16 +00:00
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.flush()
|
2012-11-09 15:25:16 +00:00
|
|
|
time.sleep(1)
|
2012-11-17 17:37:53 +00:00
|
|
|
now = datetime.now()
|
2013-11-27 13:07:24 +00:00
|
|
|
else: # pylint: disable=W0120
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.write(
|
2014-12-25 07:06:29 +00:00
|
|
|
'\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information '
|
2012-11-26 05:44:18 +00:00
|
|
|
'back\n'.format(**self.colors)
|
|
|
|
)
|
2012-11-09 15:25:16 +00:00
|
|
|
sys.stdout.flush()
|
|
|
|
return False
|
|
|
|
|
|
|
|
def __client_job_running(self, targets, jid):
|
|
|
|
running = self.client.cmd(
|
2016-12-01 22:21:49 +00:00
|
|
|
list(targets), 'saltutil.running', tgt_type='list'
|
2012-11-09 15:25:16 +00:00
|
|
|
)
|
|
|
|
return [
|
2014-11-21 19:41:22 +00:00
|
|
|
k for (k, v) in six.iteritems(running) if v and v[0]['jid'] == jid
|
2012-11-09 15:25:16 +00:00
|
|
|
]
|
|
|
|
|
2012-11-26 05:44:18 +00:00
|
|
|
def wait_for_minion_connections(self, targets, timeout):
|
2015-10-29 16:26:14 +00:00
|
|
|
salt.utils.appendproctitle('WaitForMinionConnections')
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
' {LIGHT_BLUE}*{ENDC} Waiting at most {0} for minions({1}) to '
|
|
|
|
'connect back\n'.format(
|
|
|
|
(timeout > 60 and
|
|
|
|
timedelta(seconds=timeout) or
|
|
|
|
'{0} secs'.format(timeout)),
|
|
|
|
', '.join(targets),
|
|
|
|
**self.colors
|
|
|
|
)
|
2012-11-06 11:20:06 +00:00
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.flush()
|
2012-11-06 11:20:06 +00:00
|
|
|
expected_connections = set(targets)
|
2012-11-26 05:44:18 +00:00
|
|
|
now = datetime.now()
|
|
|
|
expire = now + timedelta(seconds=timeout)
|
|
|
|
while now <= expire:
|
2014-01-14 15:16:30 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
)
|
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.write(
|
2014-12-25 07:06:29 +00:00
|
|
|
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
|
2012-11-26 05:44:18 +00:00
|
|
|
'{0}'.format(expire - now).rsplit('.', 1)[0],
|
|
|
|
', '.join(expected_connections),
|
|
|
|
**self.colors
|
|
|
|
)
|
|
|
|
)
|
|
|
|
sys.stdout.flush()
|
2012-11-26 04:32:25 +00:00
|
|
|
|
2015-04-04 22:20:52 +00:00
|
|
|
try:
|
|
|
|
responses = self.client.cmd(
|
2016-12-01 22:21:49 +00:00
|
|
|
list(expected_connections), 'test.ping', tgt_type='list',
|
2015-04-04 22:20:52 +00:00
|
|
|
)
|
|
|
|
# we'll get this exception if the master process hasn't finished starting yet
|
|
|
|
except SaltClientError:
|
|
|
|
time.sleep(0.1)
|
2015-04-23 16:55:42 +00:00
|
|
|
now = datetime.now()
|
2015-04-04 22:20:52 +00:00
|
|
|
continue
|
2012-11-26 05:44:18 +00:00
|
|
|
for target in responses:
|
2012-11-06 11:20:06 +00:00
|
|
|
if target not in expected_connections:
|
|
|
|
# Someone(minion) else "listening"?
|
|
|
|
continue
|
|
|
|
expected_connections.remove(target)
|
2014-01-14 15:16:30 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
'\r{0}\r'.format(
|
|
|
|
' ' * getattr(self.parser.options, 'output_columns',
|
|
|
|
PNUM)
|
|
|
|
)
|
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.write(
|
|
|
|
' {LIGHT_GREEN}*{ENDC} {0} connected.\n'.format(
|
2012-11-26 04:32:25 +00:00
|
|
|
target, **self.colors
|
|
|
|
)
|
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
sys.stdout.flush()
|
|
|
|
|
2012-11-06 11:20:06 +00:00
|
|
|
if not expected_connections:
|
2012-11-26 05:44:18 +00:00
|
|
|
return
|
|
|
|
|
2012-11-06 11:20:06 +00:00
|
|
|
time.sleep(1)
|
2012-11-26 05:44:18 +00:00
|
|
|
now = datetime.now()
|
2013-11-27 13:07:24 +00:00
|
|
|
else: # pylint: disable=W0120
|
2012-11-26 05:44:18 +00:00
|
|
|
print(
|
2014-12-25 07:06:29 +00:00
|
|
|
'\n {LIGHT_RED}*{ENDC} WARNING: Minions failed to connect '
|
2012-11-26 05:44:18 +00:00
|
|
|
'back. Tests requiring them WILL fail'.format(**self.colors)
|
|
|
|
)
|
2014-01-14 15:16:30 +00:00
|
|
|
try:
|
|
|
|
print_header(
|
|
|
|
'=', sep='=', inline=True,
|
|
|
|
width=getattr(self.parser.options, 'output_columns', PNUM)
|
|
|
|
|
|
|
|
)
|
|
|
|
except TypeError:
|
|
|
|
print_header('=', sep='=', inline=True)
|
2012-11-26 05:44:18 +00:00
|
|
|
raise SystemExit()
|
2012-11-06 11:20:06 +00:00
|
|
|
|
2014-01-28 12:40:03 +00:00
|
|
|
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
|
|
|
|
if not timeout:
|
|
|
|
timeout = 120
|
2012-11-06 11:20:06 +00:00
|
|
|
# Let's sync all connected minions
|
2012-11-26 05:44:18 +00:00
|
|
|
print(
|
2014-01-28 12:40:03 +00:00
|
|
|
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} '
|
|
|
|
'(saltutil.sync_{1})'.format(
|
2012-11-26 05:44:18 +00:00
|
|
|
', '.join(targets),
|
2014-01-28 12:40:03 +00:00
|
|
|
modules_kind,
|
2012-11-26 05:44:18 +00:00
|
|
|
**self.colors
|
|
|
|
)
|
|
|
|
)
|
2012-11-06 11:20:06 +00:00
|
|
|
syncing = set(targets)
|
|
|
|
jid_info = self.client.run_job(
|
2014-01-28 12:40:03 +00:00
|
|
|
list(targets), 'saltutil.sync_{0}'.format(modules_kind),
|
2016-12-01 22:21:49 +00:00
|
|
|
tgt_type='list',
|
2014-11-20 15:46:13 +00:00
|
|
|
timeout=999999999999999,
|
2012-11-06 11:20:06 +00:00
|
|
|
)
|
|
|
|
|
2012-11-26 05:44:18 +00:00
|
|
|
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
|
|
|
|
print(
|
2014-12-25 07:06:29 +00:00
|
|
|
' {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. '
|
2014-01-28 12:40:03 +00:00
|
|
|
'Tests requiring these {0} WILL fail'.format(
|
|
|
|
modules_kind, **self.colors)
|
2012-11-26 05:44:18 +00:00
|
|
|
)
|
|
|
|
raise SystemExit()
|
2012-11-09 15:25:16 +00:00
|
|
|
|
2012-11-06 11:20:06 +00:00
|
|
|
while syncing:
|
2013-01-11 19:03:53 +00:00
|
|
|
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
|
2012-11-06 11:20:06 +00:00
|
|
|
if rdata:
|
2014-11-21 19:41:22 +00:00
|
|
|
for name, output in six.iteritems(rdata):
|
2013-01-11 19:03:53 +00:00
|
|
|
if not output['ret']:
|
|
|
|
# Already synced!?
|
|
|
|
syncing.remove(name)
|
|
|
|
continue
|
|
|
|
|
2014-11-21 19:41:22 +00:00
|
|
|
if isinstance(output['ret'], six.string_types):
|
2013-11-10 19:57:41 +00:00
|
|
|
# An errors has occurred
|
|
|
|
print(
|
2014-12-25 07:06:29 +00:00
|
|
|
' {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: '
|
2014-01-28 12:40:03 +00:00
|
|
|
'{1}'.format(
|
|
|
|
name, output['ret'],
|
|
|
|
modules_kind,
|
|
|
|
**self.colors)
|
2013-11-10 19:57:41 +00:00
|
|
|
)
|
|
|
|
return False
|
|
|
|
|
2012-11-26 05:44:18 +00:00
|
|
|
print(
|
2014-01-28 12:40:03 +00:00
|
|
|
' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: '
|
2013-01-11 19:03:53 +00:00
|
|
|
'{1}'.format(
|
2014-01-28 12:40:03 +00:00
|
|
|
name,
|
|
|
|
', '.join(output['ret']),
|
|
|
|
modules_kind, **self.colors
|
2013-01-11 19:03:53 +00:00
|
|
|
)
|
2012-11-26 05:44:18 +00:00
|
|
|
)
|
2012-11-06 11:20:06 +00:00
|
|
|
# Synced!
|
|
|
|
try:
|
|
|
|
syncing.remove(name)
|
|
|
|
except KeyError:
|
2012-11-26 05:44:18 +00:00
|
|
|
print(
|
2014-12-25 07:06:29 +00:00
|
|
|
' {LIGHT_RED}*{ENDC} {0} already synced??? '
|
2012-11-26 05:44:18 +00:00
|
|
|
'{1}'.format(name, output, **self.colors)
|
|
|
|
)
|
|
|
|
return True
|
2012-11-06 11:20:06 +00:00
|
|
|
|
2014-01-28 12:40:03 +00:00
|
|
|
def sync_minion_states(self, targets, timeout=None):
|
2015-10-29 16:26:14 +00:00
|
|
|
salt.utils.appendproctitle('SyncMinionStates')
|
2014-01-28 12:40:03 +00:00
|
|
|
self.sync_minion_modules_('states', targets, timeout=timeout)
|
|
|
|
|
|
|
|
def sync_minion_modules(self, targets, timeout=None):
|
2015-10-29 16:26:14 +00:00
|
|
|
salt.utils.appendproctitle('SyncMinionModules')
|
2014-01-28 12:40:03 +00:00
|
|
|
self.sync_minion_modules_('modules', targets, timeout=timeout)
|
|
|
|
|
2015-10-31 19:43:17 +00:00
|
|
|
def sync_minion_grains(self, targets, timeout=None):
|
2017-02-10 17:49:01 +00:00
|
|
|
salt.utils.appendproctitle('SyncMinionGrains')
|
2015-10-31 19:43:17 +00:00
|
|
|
self.sync_minion_modules_('grains', targets, timeout=timeout)
|
|
|
|
|
2012-05-28 03:00:10 +00:00
|
|
|
|
2013-08-30 17:24:15 +00:00
|
|
|
class AdaptedConfigurationTestCaseMixIn(object):
|
|
|
|
|
|
|
|
__slots__ = ()
|
|
|
|
|
2017-04-01 13:21:19 +00:00
|
|
|
def get_temp_config(self, config_for, **config_overrides):
|
|
|
|
rootdir = tempfile.mkdtemp(dir=TMP)
|
|
|
|
self.addCleanup(shutil.rmtree, rootdir)
|
|
|
|
for key in ('cachedir', 'pki_dir', 'sock_dir'):
|
|
|
|
if key not in config_overrides:
|
|
|
|
config_overrides[key] = key
|
|
|
|
if 'log_file' not in config_overrides:
|
|
|
|
config_overrides['log_file'] = 'logs/{}.log'.format(config_for)
|
|
|
|
if 'user' not in config_overrides:
|
|
|
|
config_overrides['user'] = RUNNING_TESTS_USER
|
|
|
|
config_overrides['root_dir'] = rootdir
|
|
|
|
|
|
|
|
cdict = self.get_config(config_for, from_scratch=True)
|
|
|
|
|
|
|
|
if config_for in ('master', 'client_config'):
|
|
|
|
rdict = salt.config.apply_master_config(config_overrides, cdict)
|
|
|
|
if config_for == 'minion':
|
|
|
|
rdict = salt.config.apply_minion_config(config_overrides, cdict)
|
|
|
|
|
|
|
|
verify_env([os.path.join(rdict['pki_dir'], 'minions'),
|
|
|
|
os.path.join(rdict['pki_dir'], 'minions_pre'),
|
|
|
|
os.path.join(rdict['pki_dir'], 'minions_rejected'),
|
|
|
|
os.path.join(rdict['pki_dir'], 'minions_denied'),
|
|
|
|
os.path.join(rdict['cachedir'], 'jobs'),
|
|
|
|
os.path.join(rdict['cachedir'], 'raet'),
|
|
|
|
os.path.join(rdict['cachedir'], 'tokens'),
|
|
|
|
os.path.join(rdict['root_dir'], 'cache', 'tokens'),
|
|
|
|
os.path.join(rdict['pki_dir'], 'accepted'),
|
|
|
|
os.path.join(rdict['pki_dir'], 'rejected'),
|
|
|
|
os.path.join(rdict['pki_dir'], 'pending'),
|
|
|
|
os.path.dirname(rdict['log_file']),
|
|
|
|
rdict['sock_dir'],
|
|
|
|
],
|
|
|
|
RUNNING_TESTS_USER)
|
|
|
|
return rdict
|
|
|
|
|
2014-10-07 11:19:58 +00:00
|
|
|
def get_config(self, config_for, from_scratch=False):
|
|
|
|
if from_scratch:
|
|
|
|
if config_for in ('master', 'syndic_master'):
|
|
|
|
return salt.config.master_config(self.get_config_file_path(config_for))
|
|
|
|
elif config_for in ('minion', 'sub_minion'):
|
|
|
|
return salt.config.minion_config(self.get_config_file_path(config_for))
|
|
|
|
elif config_for in ('syndic',):
|
|
|
|
return salt.config.syndic_config(
|
|
|
|
self.get_config_file_path(config_for),
|
|
|
|
self.get_config_file_path('minion')
|
|
|
|
)
|
|
|
|
elif config_for == 'client_config':
|
|
|
|
return salt.config.client_config(self.get_config_file_path('master'))
|
|
|
|
|
2014-10-06 18:59:42 +00:00
|
|
|
if config_for not in RUNTIME_CONFIGS:
|
2014-10-06 19:05:46 +00:00
|
|
|
if config_for in ('master', 'syndic_master'):
|
2014-10-06 18:59:42 +00:00
|
|
|
RUNTIME_CONFIGS[config_for] = freeze(
|
|
|
|
salt.config.master_config(self.get_config_file_path(config_for))
|
|
|
|
)
|
|
|
|
elif config_for in ('minion', 'sub_minion'):
|
|
|
|
RUNTIME_CONFIGS[config_for] = freeze(
|
|
|
|
salt.config.minion_config(self.get_config_file_path(config_for))
|
|
|
|
)
|
2014-10-06 19:05:46 +00:00
|
|
|
elif config_for in ('syndic',):
|
|
|
|
RUNTIME_CONFIGS[config_for] = freeze(
|
2014-10-06 19:09:43 +00:00
|
|
|
salt.config.syndic_config(
|
|
|
|
self.get_config_file_path(config_for),
|
|
|
|
self.get_config_file_path('minion')
|
|
|
|
)
|
2014-10-06 19:05:46 +00:00
|
|
|
)
|
2014-10-07 11:19:58 +00:00
|
|
|
elif config_for == 'client_config':
|
|
|
|
RUNTIME_CONFIGS[config_for] = freeze(
|
|
|
|
salt.config.client_config(self.get_config_file_path('master'))
|
|
|
|
)
|
2014-10-06 18:59:42 +00:00
|
|
|
return RUNTIME_CONFIGS[config_for]
|
|
|
|
|
2013-08-30 17:24:15 +00:00
|
|
|
def get_config_dir(self):
|
|
|
|
return TMP_CONF_DIR
|
|
|
|
|
|
|
|
def get_config_file_path(self, filename):
|
2016-05-06 18:49:08 +00:00
|
|
|
if filename == 'syndic_master':
|
2016-05-12 12:30:16 +00:00
|
|
|
return os.path.join(TMP_SYNDIC_MASTER_CONF_DIR, 'master')
|
|
|
|
if filename == 'syndic':
|
2016-05-20 13:45:09 +00:00
|
|
|
return os.path.join(TMP_SYNDIC_MINION_CONF_DIR, 'minion')
|
2016-05-06 18:49:08 +00:00
|
|
|
if filename == 'sub_minion':
|
2016-05-12 12:30:16 +00:00
|
|
|
return os.path.join(TMP_SUB_MINION_CONF_DIR, 'minion')
|
2014-06-12 23:59:55 +00:00
|
|
|
return os.path.join(TMP_CONF_DIR, filename)
|
2013-08-30 17:24:15 +00:00
|
|
|
|
2014-07-09 16:14:23 +00:00
|
|
|
@property
|
|
|
|
def master_opts(self):
|
|
|
|
'''
|
2016-05-09 17:57:13 +00:00
|
|
|
Return the options used for the master
|
2014-07-09 16:14:23 +00:00
|
|
|
'''
|
2014-10-06 18:59:42 +00:00
|
|
|
return self.get_config('master')
|
2013-08-30 17:24:15 +00:00
|
|
|
|
2017-03-25 14:41:56 +00:00
|
|
|
@property
|
|
|
|
def minion_opts(self):
|
|
|
|
'''
|
|
|
|
Return the options used for the minion
|
|
|
|
'''
|
|
|
|
return self.get_config('minion')
|
|
|
|
|
|
|
|
@property
|
|
|
|
def sub_minion_opts(self):
|
|
|
|
'''
|
|
|
|
Return the options used for the sub_minion
|
|
|
|
'''
|
|
|
|
return self.get_config('sub_minion')
|
|
|
|
|
2014-07-15 00:41:16 +00:00
|
|
|
|
2016-05-19 19:41:23 +00:00
|
|
|
class SaltMinionEventAssertsMixIn(object):
|
|
|
|
'''
|
|
|
|
Asserts to verify that a given event was seen
|
|
|
|
'''
|
|
|
|
|
|
|
|
def __new__(cls, *args, **kwargs):
|
|
|
|
# We have to cross-call to re-gen a config
|
|
|
|
cls.q = multiprocessing.Queue()
|
|
|
|
cls.fetch_proc = multiprocessing.Process(target=cls._fetch, args=(cls.q,))
|
|
|
|
cls.fetch_proc.start()
|
|
|
|
return object.__new__(cls)
|
|
|
|
|
2016-09-19 10:08:58 +00:00
|
|
|
def __exit__(self, *args, **kwargs):
|
|
|
|
self.fetch_proc.join()
|
|
|
|
|
2016-05-19 19:41:23 +00:00
|
|
|
@staticmethod
|
|
|
|
def _fetch(q):
|
|
|
|
'''
|
|
|
|
Collect events and store them
|
|
|
|
'''
|
|
|
|
def _clean_queue():
|
|
|
|
print('Cleaning queue!')
|
|
|
|
while not q.empty():
|
|
|
|
queue_item = q.get()
|
|
|
|
queue_item.task_done()
|
|
|
|
|
|
|
|
atexit.register(_clean_queue)
|
|
|
|
a_config = AdaptedConfigurationTestCaseMixIn()
|
|
|
|
event = salt.utils.event.get_event('minion', sock_dir=a_config.get_config('minion')['sock_dir'], opts=a_config.get_config('minion'))
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
events = event.get_event(full=False)
|
|
|
|
except Exception:
|
|
|
|
# This is broad but we'll see all kinds of issues right now
|
|
|
|
# if we drop the proc out from under the socket while we're reading
|
|
|
|
pass
|
|
|
|
q.put(events)
|
|
|
|
|
|
|
|
def assertMinionEventFired(self, tag):
|
|
|
|
#TODO
|
|
|
|
raise salt.exceptions.NotImplemented('assertMinionEventFired() not implemented')
|
|
|
|
|
|
|
|
def assertMinionEventReceived(self, desired_event):
|
|
|
|
queue_wait = 5 # 2.5s
|
|
|
|
while self.q.empty():
|
|
|
|
time.sleep(0.5) # Wait for events to be pushed into the queue
|
|
|
|
queue_wait -= 1
|
|
|
|
if queue_wait <= 0:
|
|
|
|
raise AssertionError('Queue wait timer expired')
|
|
|
|
while not self.q.empty(): # This is not thread-safe and may be inaccurate
|
|
|
|
event = self.q.get()
|
|
|
|
if isinstance(event, dict):
|
|
|
|
event.pop('_stamp')
|
|
|
|
if desired_event == event:
|
|
|
|
self.fetch_proc.terminate()
|
|
|
|
return True
|
|
|
|
self.fetch_proc.terminate()
|
|
|
|
raise AssertionError('Event {0} was not received by minion'.format(desired_event))
|
|
|
|
|
|
|
|
|
2013-08-30 17:24:15 +00:00
|
|
|
class SaltClientTestCaseMixIn(AdaptedConfigurationTestCaseMixIn):
|
2012-11-07 13:40:39 +00:00
|
|
|
|
2012-12-04 11:51:37 +00:00
|
|
|
_salt_client_config_file_name_ = 'master'
|
2015-06-06 13:47:52 +00:00
|
|
|
__slots__ = ()
|
2012-11-07 13:40:39 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def client(self):
|
2014-10-07 11:56:20 +00:00
|
|
|
if 'runtime_client' not in RUNTIME_CONFIGS:
|
|
|
|
RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
|
2014-10-07 13:02:25 +00:00
|
|
|
mopts=self.get_config(self._salt_client_config_file_name_, from_scratch=True)
|
2014-10-06 20:38:10 +00:00
|
|
|
)
|
2014-10-07 11:56:20 +00:00
|
|
|
return RUNTIME_CONFIGS['runtime_client']
|
2012-12-04 11:51:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ModuleCase(TestCase, SaltClientTestCaseMixIn):
|
|
|
|
'''
|
|
|
|
Execute a module function
|
|
|
|
'''
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2012-10-17 18:06:17 +00:00
|
|
|
def minion_run(self, _function, *args, **kw):
|
|
|
|
'''
|
|
|
|
Run a single salt function on the 'minion' target and condition
|
|
|
|
the return down to match the behavior of the raw function call
|
|
|
|
'''
|
|
|
|
return self.run_function(_function, args, **kw)
|
|
|
|
|
2013-04-25 19:59:34 +00:00
|
|
|
def run_function(self, function, arg=(), minion_tgt='minion', timeout=25,
|
2013-01-14 14:07:58 +00:00
|
|
|
**kwargs):
|
2012-02-20 12:18:13 +00:00
|
|
|
'''
|
|
|
|
Run a single salt function and condition the return down to match the
|
|
|
|
behavior of the raw function call
|
|
|
|
'''
|
2013-09-05 22:06:41 +00:00
|
|
|
know_to_return_none = (
|
|
|
|
'file.chown', 'file.chgrp', 'ssh.recv_known_host'
|
|
|
|
)
|
2017-02-26 02:11:06 +00:00
|
|
|
if 'f_arg' in kwargs:
|
|
|
|
kwargs['arg'] = kwargs.pop('f_arg')
|
|
|
|
if 'f_timeout' in kwargs:
|
|
|
|
kwargs['timeout'] = kwargs.pop('f_timeout')
|
2012-08-04 22:25:37 +00:00
|
|
|
orig = self.client.cmd(
|
2013-04-22 21:04:33 +00:00
|
|
|
minion_tgt, function, arg, timeout=timeout, kwarg=kwargs
|
2012-08-04 22:25:37 +00:00
|
|
|
)
|
2012-09-01 06:29:06 +00:00
|
|
|
|
2012-09-07 19:03:44 +00:00
|
|
|
if minion_tgt not in orig:
|
|
|
|
self.skipTest(
|
|
|
|
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
|
2012-10-05 06:10:53 +00:00
|
|
|
'from the minion \'{0}\'. Command output: {1}'.format(
|
|
|
|
minion_tgt, orig
|
|
|
|
)
|
2012-09-07 19:03:44 +00:00
|
|
|
)
|
|
|
|
elif orig[minion_tgt] is None and function not in know_to_return_none:
|
2012-09-01 04:20:53 +00:00
|
|
|
self.skipTest(
|
|
|
|
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get \'{0}\' from '
|
2012-10-05 06:10:53 +00:00
|
|
|
'the minion \'{1}\'. Command output: {2}'.format(
|
|
|
|
function, minion_tgt, orig
|
|
|
|
)
|
2012-09-01 04:20:53 +00:00
|
|
|
)
|
2014-02-04 22:36:39 +00:00
|
|
|
|
|
|
|
# Try to match stalled state functions
|
|
|
|
orig[minion_tgt] = self._check_state_return(
|
2014-09-12 05:32:51 +00:00
|
|
|
orig[minion_tgt]
|
2014-02-04 22:36:39 +00:00
|
|
|
)
|
2014-02-04 21:07:13 +00:00
|
|
|
|
2012-09-01 06:29:06 +00:00
|
|
|
return orig[minion_tgt]
|
2012-02-20 12:18:13 +00:00
|
|
|
|
2012-05-13 02:40:28 +00:00
|
|
|
def run_state(self, function, **kwargs):
|
|
|
|
'''
|
|
|
|
Run the state.single command and return the state return structure
|
|
|
|
'''
|
2014-02-04 21:07:13 +00:00
|
|
|
ret = self.run_function('state.single', [function], **kwargs)
|
|
|
|
return self._check_state_return(ret)
|
2012-05-13 02:40:28 +00:00
|
|
|
|
2014-09-12 05:32:51 +00:00
|
|
|
def _check_state_return(self, ret):
|
2014-02-04 21:07:13 +00:00
|
|
|
if isinstance(ret, dict):
|
|
|
|
# This is the supposed return format for state calls
|
|
|
|
return ret
|
|
|
|
|
|
|
|
if isinstance(ret, list):
|
2014-02-04 22:49:13 +00:00
|
|
|
jids = []
|
2014-02-04 21:07:13 +00:00
|
|
|
# These are usually errors
|
2014-02-04 22:49:13 +00:00
|
|
|
for item in ret[:]:
|
2014-11-21 19:41:22 +00:00
|
|
|
if not isinstance(item, six.string_types):
|
2014-02-04 21:07:13 +00:00
|
|
|
# We don't know how to handle this
|
|
|
|
continue
|
|
|
|
match = STATE_FUNCTION_RUNNING_RE.match(item)
|
|
|
|
if not match:
|
|
|
|
# We don't know how to handle this
|
|
|
|
continue
|
2014-02-05 02:32:04 +00:00
|
|
|
jid = match.group('jid')
|
2014-02-04 22:49:13 +00:00
|
|
|
if jid in jids:
|
|
|
|
continue
|
|
|
|
|
|
|
|
jids.append(jid)
|
|
|
|
|
2014-02-04 21:07:13 +00:00
|
|
|
job_data = self.run_function(
|
2014-02-07 18:00:40 +00:00
|
|
|
'saltutil.find_job', [jid]
|
2014-02-04 21:07:13 +00:00
|
|
|
)
|
2014-02-04 22:49:13 +00:00
|
|
|
job_kill = self.run_function('saltutil.kill_job', [jid])
|
|
|
|
msg = (
|
2014-02-04 21:07:13 +00:00
|
|
|
'A running state.single was found causing a state lock. '
|
2015-08-27 04:26:07 +00:00
|
|
|
'Job details: \'{0}\' Killing Job Returned: \'{1}\''.format(
|
2014-02-08 00:33:26 +00:00
|
|
|
job_data, job_kill
|
|
|
|
)
|
2014-02-04 21:07:13 +00:00
|
|
|
)
|
2014-02-08 00:33:26 +00:00
|
|
|
ret.append('[TEST SUITE ENFORCED]{0}'
|
2014-02-04 22:49:13 +00:00
|
|
|
'[/TEST SUITE ENFORCED]'.format(msg))
|
2014-02-04 21:07:13 +00:00
|
|
|
return ret
|
|
|
|
|
2012-05-29 16:40:20 +00:00
|
|
|
|
2012-12-04 11:51:37 +00:00
|
|
|
class SyndicCase(TestCase, SaltClientTestCaseMixIn):
|
2012-03-29 04:25:59 +00:00
|
|
|
'''
|
|
|
|
Execute a syndic based execution test
|
|
|
|
'''
|
2012-12-04 11:51:37 +00:00
|
|
|
_salt_client_config_file_name_ = 'syndic_master'
|
2012-03-29 04:25:59 +00:00
|
|
|
|
|
|
|
def run_function(self, function, arg=()):
|
|
|
|
'''
|
|
|
|
Run a single salt function and condition the return down to match the
|
|
|
|
behavior of the raw function call
|
|
|
|
'''
|
2013-04-25 19:59:34 +00:00
|
|
|
orig = self.client.cmd('minion', function, arg, timeout=25)
|
2012-09-07 19:24:35 +00:00
|
|
|
if 'minion' not in orig:
|
|
|
|
self.skipTest(
|
|
|
|
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
|
2012-10-05 06:10:53 +00:00
|
|
|
'from the minion. Command output: {0}'.format(orig)
|
2012-09-07 19:24:35 +00:00
|
|
|
)
|
2012-03-29 04:25:59 +00:00
|
|
|
return orig['minion']
|
2012-04-21 22:58:03 +00:00
|
|
|
|
2012-05-13 02:40:28 +00:00
|
|
|
|
2016-05-12 12:03:37 +00:00
|
|
|
class ShellCase(AdaptedConfigurationTestCaseMixIn, ShellTestCase, ScriptPathMixin):
|
2012-04-21 22:58:03 +00:00
|
|
|
'''
|
|
|
|
Execute a test for a shell command
|
|
|
|
'''
|
2012-11-18 23:49:37 +00:00
|
|
|
|
2013-06-25 13:13:14 +00:00
|
|
|
_code_dir_ = CODE_DIR
|
|
|
|
_script_dir_ = SCRIPT_DIR
|
|
|
|
_python_executable_ = PYEXEC
|
2012-04-21 23:27:59 +00:00
|
|
|
|
2015-07-28 11:07:04 +00:00
|
|
|
def chdir(self, dirname):
|
|
|
|
try:
|
|
|
|
os.chdir(dirname)
|
|
|
|
except OSError:
|
|
|
|
os.chdir(INTEGRATION_TEST_DIR)
|
|
|
|
|
2017-01-23 12:10:22 +00:00
|
|
|
def run_salt(self, arg_str, with_retcode=False, catch_stderr=False, timeout=60): # pylint: disable=W0221
|
2012-04-21 23:52:29 +00:00
|
|
|
'''
|
2012-05-23 14:14:16 +00:00
|
|
|
Execute salt
|
2012-04-21 23:52:29 +00:00
|
|
|
'''
|
2013-08-30 17:24:15 +00:00
|
|
|
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
|
2016-08-02 22:30:09 +00:00
|
|
|
return self.run_script('salt', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=timeout)
|
2012-04-21 23:52:29 +00:00
|
|
|
|
2017-01-23 12:10:22 +00:00
|
|
|
def run_ssh(self, arg_str, with_retcode=False, catch_stderr=False, timeout=60): # pylint: disable=W0221
|
2014-05-09 21:54:21 +00:00
|
|
|
'''
|
|
|
|
Execute salt-ssh
|
|
|
|
'''
|
2016-08-31 11:42:27 +00:00
|
|
|
arg_str = '-ldebug -W -c {0} -i --priv {1} --roster-file {2} --out=json localhost {3}'.format(self.get_config_dir(), os.path.join(TMP_CONF_DIR, 'key_test'), os.path.join(TMP_CONF_DIR, 'roster'), arg_str)
|
2016-07-27 16:40:02 +00:00
|
|
|
return self.run_script('salt-ssh', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=timeout, raw=True)
|
2014-05-09 21:54:21 +00:00
|
|
|
|
2016-06-29 16:49:14 +00:00
|
|
|
def run_run(self, arg_str, with_retcode=False, catch_stderr=False, async=False, timeout=60, config_dir=None):
|
2012-04-21 23:52:29 +00:00
|
|
|
'''
|
2012-05-23 14:14:16 +00:00
|
|
|
Execute salt-run
|
2012-04-21 23:52:29 +00:00
|
|
|
'''
|
2016-06-29 16:49:14 +00:00
|
|
|
arg_str = '-c {0}{async_flag} -t {timeout} {1}'.format(config_dir or self.get_config_dir(),
|
2015-01-09 20:26:25 +00:00
|
|
|
arg_str,
|
2015-01-26 17:59:55 +00:00
|
|
|
timeout=timeout,
|
2015-01-09 20:26:25 +00:00
|
|
|
async_flag=' --async' if async else '')
|
2017-01-23 12:10:22 +00:00
|
|
|
return self.run_script('salt-run', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=60)
|
2012-04-21 23:52:29 +00:00
|
|
|
|
2016-09-01 16:32:23 +00:00
|
|
|
def run_run_plus(self, fun, *arg, **kwargs):
|
2012-05-28 03:00:10 +00:00
|
|
|
'''
|
2016-09-01 16:39:54 +00:00
|
|
|
Execute the runner function and return the return data and output in a dict
|
2012-05-28 03:00:10 +00:00
|
|
|
'''
|
2016-08-31 03:57:06 +00:00
|
|
|
ret = {'fun': fun}
|
2016-09-01 15:20:18 +00:00
|
|
|
from_scratch = bool(kwargs.pop('__reload_config', False))
|
2016-08-31 03:57:06 +00:00
|
|
|
# Have to create an empty dict and then update it, as the result from
|
2016-09-01 15:20:18 +00:00
|
|
|
# self.get_config() is an ImmutableDict which cannot be updated.
|
2014-10-06 18:59:42 +00:00
|
|
|
opts = {}
|
2016-09-01 00:43:22 +00:00
|
|
|
opts.update(self.get_config('client_config', from_scratch=from_scratch))
|
2016-08-31 03:57:06 +00:00
|
|
|
opts_arg = list(arg)
|
|
|
|
if kwargs:
|
|
|
|
opts_arg.append({'__kwarg__': True})
|
|
|
|
opts_arg[-1].update(kwargs)
|
|
|
|
opts.update({'doc': False, 'fun': fun, 'arg': opts_arg})
|
2012-11-23 12:19:09 +00:00
|
|
|
with RedirectStdStreams():
|
|
|
|
runner = salt.runner.Runner(opts)
|
2016-08-31 03:57:06 +00:00
|
|
|
ret['return'] = runner.run()
|
|
|
|
try:
|
|
|
|
ret['jid'] = runner.jid
|
|
|
|
except AttributeError:
|
|
|
|
ret['jid'] = None
|
2016-09-01 15:20:18 +00:00
|
|
|
|
|
|
|
# Compile output
|
|
|
|
# TODO: Support outputters other than nested
|
|
|
|
opts['color'] = False
|
|
|
|
opts['output_file'] = cStringIO()
|
|
|
|
try:
|
|
|
|
salt.output.display_output(ret['return'], opts=opts)
|
|
|
|
ret['out'] = opts['output_file'].getvalue().splitlines()
|
|
|
|
finally:
|
|
|
|
opts['output_file'].close()
|
|
|
|
|
2012-05-28 03:00:10 +00:00
|
|
|
return ret
|
|
|
|
|
2013-09-13 16:51:25 +00:00
|
|
|
def run_key(self, arg_str, catch_stderr=False, with_retcode=False):
|
2012-04-21 23:27:59 +00:00
|
|
|
'''
|
|
|
|
Execute salt-key
|
|
|
|
'''
|
2013-08-30 17:24:15 +00:00
|
|
|
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
|
2013-09-13 16:51:25 +00:00
|
|
|
return self.run_script(
|
|
|
|
'salt-key',
|
|
|
|
arg_str,
|
|
|
|
catch_stderr=catch_stderr,
|
2016-07-26 16:54:45 +00:00
|
|
|
with_retcode=with_retcode,
|
2017-01-23 12:10:22 +00:00
|
|
|
timeout=60
|
2013-09-13 16:51:25 +00:00
|
|
|
)
|
2012-08-04 21:28:51 +00:00
|
|
|
|
2014-04-24 14:42:17 +00:00
|
|
|
def run_cp(self, arg_str, with_retcode=False, catch_stderr=False):
|
2012-08-04 21:28:51 +00:00
|
|
|
'''
|
|
|
|
Execute salt-cp
|
|
|
|
'''
|
2013-08-30 17:24:15 +00:00
|
|
|
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
|
2017-01-23 12:10:22 +00:00
|
|
|
return self.run_script('salt-cp', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=60)
|
2012-08-13 06:10:42 +00:00
|
|
|
|
2014-04-24 14:42:17 +00:00
|
|
|
def run_call(self, arg_str, with_retcode=False, catch_stderr=False):
|
2016-03-04 19:13:27 +00:00
|
|
|
'''
|
|
|
|
Execute salt-call.
|
|
|
|
'''
|
2013-08-30 17:24:15 +00:00
|
|
|
arg_str = '--config-dir {0} {1}'.format(self.get_config_dir(), arg_str)
|
2017-01-23 12:10:22 +00:00
|
|
|
return self.run_script('salt-call', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, timeout=60)
|
2012-08-04 18:58:32 +00:00
|
|
|
|
2017-01-23 12:10:22 +00:00
|
|
|
def run_cloud(self, arg_str, catch_stderr=False, timeout=30):
|
2013-11-28 17:55:58 +00:00
|
|
|
'''
|
|
|
|
Execute salt-cloud
|
|
|
|
'''
|
2013-11-28 18:30:50 +00:00
|
|
|
arg_str = '-c {0} {1}'.format(self.get_config_dir(), arg_str)
|
2016-10-25 19:30:41 +00:00
|
|
|
return self.run_script('salt-cloud', arg_str, catch_stderr,
|
|
|
|
timeout=timeout)
|
2013-11-28 17:55:58 +00:00
|
|
|
|
2012-08-04 18:58:32 +00:00
|
|
|
|
2014-12-13 00:11:41 +00:00
|
|
|
@requires_sshd_server
|
2014-05-12 22:38:16 +00:00
|
|
|
class SSHCase(ShellCase):
|
|
|
|
'''
|
|
|
|
Execute a command via salt-ssh
|
|
|
|
'''
|
|
|
|
def _arg_str(self, function, arg):
|
|
|
|
return '{0} {1}'.format(function, ' '.join(arg))
|
|
|
|
|
2016-08-31 12:34:38 +00:00
|
|
|
def run_function(self, function, arg=(), timeout=90, **kwargs):
|
|
|
|
'''
|
|
|
|
We use a 90s timeout here, which some slower systems do end up needing
|
|
|
|
'''
|
2016-07-27 16:40:02 +00:00
|
|
|
ret = self.run_ssh(self._arg_str(function, arg), timeout=timeout)
|
2014-05-12 22:38:16 +00:00
|
|
|
try:
|
|
|
|
return json.loads(ret)['localhost']
|
|
|
|
except Exception:
|
|
|
|
return ret
|
|
|
|
|
|
|
|
|
2012-11-20 16:09:57 +00:00
|
|
|
class SaltReturnAssertsMixIn(object):
|
|
|
|
|
2012-11-25 22:29:08 +00:00
|
|
|
def assertReturnSaltType(self, ret):
|
2012-11-20 16:09:57 +00:00
|
|
|
try:
|
2012-11-21 12:16:17 +00:00
|
|
|
self.assertTrue(isinstance(ret, dict))
|
2012-11-20 16:09:57 +00:00
|
|
|
except AssertionError:
|
|
|
|
raise AssertionError(
|
2012-11-21 12:16:17 +00:00
|
|
|
'{0} is not dict. Salt returned: {1}'.format(
|
2012-11-25 22:29:08 +00:00
|
|
|
type(ret).__name__, ret
|
2012-11-20 16:09:57 +00:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2012-11-25 22:29:08 +00:00
|
|
|
def assertReturnNonEmptySaltType(self, ret):
|
|
|
|
self.assertReturnSaltType(ret)
|
2012-11-21 12:16:17 +00:00
|
|
|
try:
|
|
|
|
self.assertNotEqual(ret, {})
|
|
|
|
except AssertionError:
|
|
|
|
raise AssertionError(
|
|
|
|
'{} is equal to {}. Salt returned an empty dictionary.'
|
|
|
|
)
|
|
|
|
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
def __return_valid_keys(self, keys):
|
2012-12-12 17:11:44 +00:00
|
|
|
if isinstance(keys, tuple):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
# If it's a tuple, turn it into a list
|
2012-12-07 12:46:02 +00:00
|
|
|
keys = list(keys)
|
2014-11-21 19:41:22 +00:00
|
|
|
elif isinstance(keys, six.string_types):
|
|
|
|
# If it's a string, make it a one item list
|
2012-12-07 12:46:02 +00:00
|
|
|
keys = [keys]
|
2012-12-12 17:11:44 +00:00
|
|
|
elif not isinstance(keys, list):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
# If we've reached here, it's a bad type passed to keys
|
2012-12-07 12:46:02 +00:00
|
|
|
raise RuntimeError('The passed keys need to be a list')
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
return keys
|
2012-12-07 12:46:02 +00:00
|
|
|
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
def __getWithinSaltReturn(self, ret, keys):
|
2012-12-12 17:11:44 +00:00
|
|
|
self.assertReturnNonEmptySaltType(ret)
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
keys = self.__return_valid_keys(keys)
|
|
|
|
okeys = keys[:]
|
2014-11-21 19:41:22 +00:00
|
|
|
for part in six.itervalues(ret):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
try:
|
|
|
|
ret_item = part[okeys.pop(0)]
|
|
|
|
except (KeyError, TypeError):
|
|
|
|
raise AssertionError(
|
|
|
|
'Could not get ret{0} from salt\'s return: {1}'.format(
|
2015-08-27 04:26:07 +00:00
|
|
|
''.join(['[\'{0}\']'.format(k) for k in keys]), part
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
)
|
|
|
|
)
|
2012-12-07 12:46:02 +00:00
|
|
|
while okeys:
|
|
|
|
try:
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
ret_item = ret_item[okeys.pop(0)]
|
2012-12-07 12:46:02 +00:00
|
|
|
except (KeyError, TypeError):
|
|
|
|
raise AssertionError(
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
'Could not get ret{0} from salt\'s return: {1}'.format(
|
2015-08-27 04:26:07 +00:00
|
|
|
''.join(['[\'{0}\']'.format(k) for k in keys]), part
|
2012-12-07 12:46:02 +00:00
|
|
|
)
|
|
|
|
)
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
return ret_item
|
|
|
|
|
2012-12-12 17:11:44 +00:00
|
|
|
def assertSaltTrueReturn(self, ret):
|
|
|
|
try:
|
|
|
|
self.assertTrue(self.__getWithinSaltReturn(ret, 'result'))
|
|
|
|
except AssertionError:
|
2013-04-25 19:59:34 +00:00
|
|
|
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
|
2013-01-14 14:07:58 +00:00
|
|
|
try:
|
|
|
|
raise AssertionError(
|
|
|
|
'{result} is not True. Salt Comment:\n{comment}'.format(
|
2015-04-08 18:43:55 +00:00
|
|
|
**(next(six.itervalues(ret)))
|
2013-01-14 14:07:58 +00:00
|
|
|
)
|
|
|
|
)
|
2013-04-25 19:59:34 +00:00
|
|
|
except (AttributeError, IndexError):
|
2013-01-14 14:07:58 +00:00
|
|
|
raise AssertionError(
|
2013-04-25 19:59:34 +00:00
|
|
|
'Failed to get result. Salt Returned:\n{0}'.format(
|
|
|
|
pprint.pformat(ret)
|
|
|
|
)
|
2012-12-12 17:11:44 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
def assertSaltFalseReturn(self, ret):
|
|
|
|
try:
|
|
|
|
self.assertFalse(self.__getWithinSaltReturn(ret, 'result'))
|
|
|
|
except AssertionError:
|
2013-04-25 19:59:34 +00:00
|
|
|
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
|
2013-01-14 14:07:58 +00:00
|
|
|
try:
|
|
|
|
raise AssertionError(
|
|
|
|
'{result} is not False. Salt Comment:\n{comment}'.format(
|
2015-04-08 18:43:55 +00:00
|
|
|
**(next(six.itervalues(ret)))
|
2013-01-14 14:07:58 +00:00
|
|
|
)
|
|
|
|
)
|
2013-04-25 19:59:34 +00:00
|
|
|
except (AttributeError, IndexError):
|
2013-01-14 14:07:58 +00:00
|
|
|
raise AssertionError(
|
|
|
|
'Failed to get result. Salt Returned: {0}'.format(ret)
|
2012-12-12 17:11:44 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
def assertSaltNoneReturn(self, ret):
|
|
|
|
try:
|
|
|
|
self.assertIsNone(self.__getWithinSaltReturn(ret, 'result'))
|
|
|
|
except AssertionError:
|
2013-04-25 19:59:34 +00:00
|
|
|
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
|
2013-01-14 14:07:58 +00:00
|
|
|
try:
|
|
|
|
raise AssertionError(
|
|
|
|
'{result} is not None. Salt Comment:\n{comment}'.format(
|
2015-04-08 18:43:55 +00:00
|
|
|
**(next(six.itervalues(ret)))
|
2013-01-14 14:07:58 +00:00
|
|
|
)
|
|
|
|
)
|
2013-04-25 19:59:34 +00:00
|
|
|
except (AttributeError, IndexError):
|
2013-01-14 14:07:58 +00:00
|
|
|
raise AssertionError(
|
|
|
|
'Failed to get result. Salt Returned: {0}'.format(ret)
|
2012-12-12 17:11:44 +00:00
|
|
|
)
|
|
|
|
|
2013-08-18 04:46:33 +00:00
|
|
|
def assertInSaltComment(self, in_comment, ret):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
return self.assertIn(
|
|
|
|
in_comment, self.__getWithinSaltReturn(ret, 'comment')
|
|
|
|
)
|
|
|
|
|
2013-08-18 04:49:18 +00:00
|
|
|
def assertNotInSaltComment(self, not_in_comment, ret):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
return self.assertNotIn(
|
|
|
|
not_in_comment, self.__getWithinSaltReturn(ret, 'comment')
|
|
|
|
)
|
|
|
|
|
|
|
|
def assertSaltCommentRegexpMatches(self, ret, pattern):
|
|
|
|
return self.assertInSaltReturnRegexpMatches(ret, pattern, 'comment')
|
|
|
|
|
2014-12-10 20:03:28 +00:00
|
|
|
def assertInSaltStateWarning(self, in_comment, ret):
|
2013-07-27 14:58:28 +00:00
|
|
|
return self.assertIn(
|
|
|
|
in_comment, self.__getWithinSaltReturn(ret, 'warnings')
|
|
|
|
)
|
|
|
|
|
|
|
|
def assertNotInSaltStateWarning(self, not_in_comment, ret):
|
|
|
|
return self.assertNotIn(
|
|
|
|
not_in_comment, self.__getWithinSaltReturn(ret, 'warnings')
|
|
|
|
)
|
|
|
|
|
2013-08-18 05:01:38 +00:00
|
|
|
def assertInSaltReturn(self, item_to_check, ret, keys):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
return self.assertIn(
|
|
|
|
item_to_check, self.__getWithinSaltReturn(ret, keys)
|
|
|
|
)
|
|
|
|
|
2013-08-18 04:58:42 +00:00
|
|
|
def assertNotInSaltReturn(self, item_to_check, ret, keys):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
return self.assertNotIn(
|
|
|
|
item_to_check, self.__getWithinSaltReturn(ret, keys)
|
|
|
|
)
|
|
|
|
|
|
|
|
def assertInSaltReturnRegexpMatches(self, ret, pattern, keys=()):
|
2017-03-31 11:22:33 +00:00
|
|
|
return self.assertRegex(
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
self.__getWithinSaltReturn(ret, keys), pattern
|
|
|
|
)
|
2012-12-07 12:46:02 +00:00
|
|
|
|
|
|
|
def assertSaltStateChangesEqual(self, ret, comparison, keys=()):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
keys = ['changes'] + self.__return_valid_keys(keys)
|
|
|
|
return self.assertEqual(
|
|
|
|
self.__getWithinSaltReturn(ret, keys), comparison
|
|
|
|
)
|
2012-12-07 12:46:02 +00:00
|
|
|
|
|
|
|
def assertSaltStateChangesNotEqual(self, ret, comparison, keys=()):
|
Abstract `SaltReturnAssertsMixIn` to allow more tests using it.
* Created a method which gets a deep keyed value in a salt state return. For example, allow getting, `ret['changes']['new']`.
* Created a method which tries to make sure the correct type of the passed keys for the above added method.
* Also added were the methods, `assertInSaltReturn`, `assertNotInSaltReturn`, `assertInSaltReturnRegexpMatches`, which simplified the existing `assertInSaltComment`, `assertNotInSaltComment`, `assertSaltCommentRegexpMatches`, `assertSaltStateChangesEqual` and `assertSaltStateChangesNotEqual`, also allowing to do similar tests in every key(deep or not) of a salt state return dictionary.
2012-12-12 13:29:43 +00:00
|
|
|
keys = ['changes'] + self.__return_valid_keys(keys)
|
|
|
|
return self.assertNotEqual(
|
|
|
|
self.__getWithinSaltReturn(ret, keys), comparison
|
|
|
|
)
|