yapf autoformat

This commit is contained in:
Alexey Lavrenuke 2016-12-30 16:46:29 +03:00
parent 443bd5291d
commit 40706c55b8
75 changed files with 1701 additions and 1560 deletions

7
.style.yapf Normal file
View File

@ -0,0 +1,7 @@
[style]
based_on_style = pep8
COALESCE_BRACKETS = True
COLUMN_LIMIT = 80
DEDENT_CLOSING_BRACKETS = False
SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True
SPLIT_BEFORE_FIRST_ARGUMENT = True

View File

@ -2,7 +2,7 @@ from setuptools import setup, find_packages
setup( setup(
name='yandextank', name='yandextank',
version='1.8.29-9', version='1.8.29-10',
description='a performance measurement tool', description='a performance measurement tool',
longer_description=''' longer_description='''
Yandex.Tank is a performance measurement and load testing automatization tool. Yandex.Tank is a performance measurement and load testing automatization tool.

View File

@ -27,8 +27,9 @@ class ApiWorker:
file_handler = logging.FileHandler(self.log_filename) file_handler = logging.FileHandler(self.log_filename)
file_handler.setLevel(logging.DEBUG) file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter( file_handler.setFormatter(
"%(asctime)s [%(levelname)s] %(name)s %(message)s")) logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s %(message)s"))
logger.addHandler(file_handler) logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout) console_handler = logging.StreamHandler(sys.stdout)
stderr_hdl = logging.StreamHandler(sys.stderr) stderr_hdl = logging.StreamHandler(sys.stderr)
@ -65,8 +66,8 @@ class ApiWorker:
""" Make preparations before running Tank """ """ Make preparations before running Tank """
self.options = options self.options = options
if self.options.get('lock_dir', None): if self.options.get('lock_dir', None):
self.core.set_option(self.core.SECTION, "lock_dir", self.core.set_option(
self.options['lock_dir']) self.core.SECTION, "lock_dir", self.options['lock_dir'])
while True: while True:
try: try:
@ -109,13 +110,14 @@ class ApiWorker:
except KeyboardInterrupt as ex: except KeyboardInterrupt as ex:
self.log.info( self.log.info(
"Do not press Ctrl+C again, the test will be broken otherwise") "Do not press Ctrl+C again, the test will be broken otherwise")
self.log.debug("Caught KeyboardInterrupt: %s", self.log.debug(
traceback.format_exc(ex)) "Caught KeyboardInterrupt: %s", traceback.format_exc(ex))
try: try:
retcode = self.__graceful_shutdown() retcode = self.__graceful_shutdown()
except KeyboardInterrupt as ex: except KeyboardInterrupt as ex:
self.log.debug("Caught KeyboardInterrupt again: %s", self.log.debug(
traceback.format_exc(ex)) "Caught KeyboardInterrupt again: %s",
traceback.format_exc(ex))
self.log.info( self.log.info(
"User insists on exiting, aborting graceful shutdown...") "User insists on exiting, aborting graceful shutdown...")
retcode = 1 retcode = 1
@ -138,12 +140,13 @@ class ApiWorker:
for filename in conf_files: for filename in conf_files:
if fnmatch.fnmatch(filename, '*.ini'): if fnmatch.fnmatch(filename, '*.ini'):
configs += [ configs += [
os.path.realpath(self.baseconfigs_location + os.sep + os.path.realpath(
filename) self.baseconfigs_location + os.sep + filename)
] ]
except OSError: except OSError:
self.log.warn(self.baseconfigs_location + self.log.warn(
' is not accessible to get configs list') self.baseconfigs_location +
' is not accessible to get configs list')
configs += [os.path.expanduser('~/.yandex-tank')] configs += [os.path.expanduser('~/.yandex-tank')]
return configs return configs

View File

@ -76,8 +76,8 @@ class AbstractPlugin(object):
def publish(self, key, value): def publish(self, key, value):
"""publish value to status""" """publish value to status"""
self.log.debug("Publishing status: %s/%s: %s", self.__class__.__name__, self.log.debug(
key, value) "Publishing status: %s/%s: %s", self.__class__.__name__, key, value)
self.core.publish(self.__class__.__name__, key, value) self.core.publish(self.__class__.__name__, key, value)
def close(self): def close(self):
@ -175,25 +175,20 @@ class AbstractCriterion(object):
class GeneratorPlugin(object): class GeneratorPlugin(object):
DEFAULT_INFO = {'address': '', DEFAULT_INFO = {
'port': 80, 'address': '',
'instances': 1, 'port': 80,
'ammo_file': '', 'instances': 1,
'rps_schedule': [], 'ammo_file': '',
'duration': 0, 'rps_schedule': [],
'loop_count': 0} 'duration': 0,
'loop_count': 0
}
class Info(object): class Info(object):
def __init__( def __init__(
self, self, address, port, instances, ammo_file, rps_schedule,
address, duration, loop_count):
port,
instances,
ammo_file,
rps_schedule,
duration,
loop_count):
self.address = address self.address = address
self.port = port self.port = port
self.instances = instances self.instances = instances

View File

@ -62,8 +62,7 @@ class ResourceManager(object):
'Reading large resource to memory: %s. Size: %s bytes', 'Reading large resource to memory: %s. Size: %s bytes',
filename, size) filename, size)
except Exception as exc: except Exception as exc:
logger.debug('Unable to check resource size %s. %s', filename, logger.debug('Unable to check resource size %s. %s', filename, exc)
exc)
with opener(filename, 'r') as resource: with opener(filename, 'r') as resource:
content = resource.read() content = resource.read()
return content return content
@ -145,20 +144,17 @@ class HttpOpener(object):
def open(self, *args, **kwargs): def open(self, *args, **kwargs):
with closing( with closing(
requests.get( requests.get(
self.url, self.url, stream=True, verify=False,
stream=True, timeout=self.timeout)) as stream:
verify=False,
timeout=self.timeout
)
) as stream:
stream_iterator = stream.raw.stream(100, decode_content=True) stream_iterator = stream.raw.stream(100, decode_content=True)
header = stream_iterator.next() header = stream_iterator.next()
fmt = self.fmt_detector.detect_format(header) fmt = self.fmt_detector.detect_format(header)
logger.debug('Resource %s format detected: %s.', self.url, fmt) logger.debug('Resource %s format detected: %s.', self.url, fmt)
if not self.force_download and fmt != 'gzip' and self.data_length > 10**8: if not self.force_download and fmt != 'gzip' and self.data_length > 10**8:
logger.info( logger.info(
"Resource data is not gzipped and larger than 100MB. Reading from stream..") "Resource data is not gzipped and larger than 100MB. Reading from stream.."
)
return HttpStreamWrapper(self.url) return HttpStreamWrapper(self.url)
else: else:
downloaded_f_path = self.download_file() downloaded_f_path = self.download_file()
@ -176,44 +172,47 @@ class HttpOpener(object):
"Resource %s has already been downloaded to %s . Using it..", "Resource %s has already been downloaded to %s . Using it..",
self.url, tmpfile_path) self.url, tmpfile_path)
else: else:
logger.info("Downloading resource %s to %s", self.url, logger.info("Downloading resource %s to %s", self.url, tmpfile_path)
tmpfile_path)
try: try:
data = requests.get(self.url, verify=False, timeout=10) data = requests.get(self.url, verify=False, timeout=10)
except requests.exceptions.Timeout as exc: except requests.exceptions.Timeout as exc:
raise RuntimeError('Connection timeout reached ' raise RuntimeError(
'trying to download resource: %s \n' 'Connection timeout reached '
'via HttpOpener: %s' % (self.url, exc)) 'trying to download resource: %s \n'
'via HttpOpener: %s' % (self.url, exc))
f = open(tmpfile_path, "wb") f = open(tmpfile_path, "wb")
f.write(data.content) f.write(data.content)
f.close() f.close()
logger.info("Successfully downloaded resource %s to %s", logger.info(
self.url, tmpfile_path) "Successfully downloaded resource %s to %s", self.url,
tmpfile_path)
return tmpfile_path return tmpfile_path
def get_request_info(self): def get_request_info(self):
logger.info('Trying to get info about resource %s', self.url) logger.info('Trying to get info about resource %s', self.url)
req = requests.Request('HEAD', req = requests.Request(
self.url, 'HEAD', self.url, headers={'Accept-Encoding': 'identity'})
headers={'Accept-Encoding': 'identity'})
session = requests.Session() session = requests.Session()
prepared = session.prepare_request(req) prepared = session.prepare_request(req)
try: try:
self.data_info = session.send(prepared, self.data_info = session.send(
verify=False, prepared,
allow_redirects=True, verify=False,
timeout=self.timeout) allow_redirects=True,
except (requests.exceptions.Timeout, timeout=self.timeout)
except (
requests.exceptions.Timeout,
requests.exceptions.ConnectionError) as exc: requests.exceptions.ConnectionError) as exc:
logger.warning( logger.warning(
'Connection error trying to get info about resource %s \n' 'Connection error trying to get info about resource %s \n'
'Exception: %s \n' 'Exception: %s \n'
'Retrying...' % (self.url, exc)) 'Retrying...' % (self.url, exc))
try: try:
self.data_info = session.send(prepared, self.data_info = session.send(
verify=False, prepared,
allow_redirects=True, verify=False,
timeout=self.timeout) allow_redirects=True,
timeout=self.timeout)
except Exception as exc: except Exception as exc:
logger.debug( logger.debug(
'Connection error trying to get info about resource %s \n' 'Connection error trying to get info about resource %s \n'
@ -228,12 +227,14 @@ class HttpOpener(object):
except requests.exceptions.HTTPError as exc: except requests.exceptions.HTTPError as exc:
if exc.response.status_code == 405: if exc.response.status_code == 405:
logger.info( logger.info(
"Resource storage does not support HEAD method. Ignore proto error and force download file.") "Resource storage does not support HEAD method. Ignore proto error and force download file."
)
self.force_download = True self.force_download = True
else: else:
raise RuntimeError('Invalid HTTP response ' raise RuntimeError(
'trying to get info about resource: %s \n' 'Invalid HTTP response '
'via HttpOpener: %s' % (self.url, exc)) 'trying to get info about resource: %s \n'
'via HttpOpener: %s' % (self.url, exc))
@property @property
def get_filename(self): def get_filename(self):
@ -262,14 +263,13 @@ class HttpStreamWrapper:
self.pointer = 0 self.pointer = 0
self.stream_iterator = None self.stream_iterator = None
self._content_consumed = False self._content_consumed = False
self.chunk_size = 10 ** 3 self.chunk_size = 10**3
try: try:
self.stream = requests.get(self.url, self.stream = requests.get(
stream=True, self.url, stream=True, verify=False, timeout=10)
verify=False,
timeout=10)
self.stream_iterator = self.stream.iter_content(self.chunk_size) self.stream_iterator = self.stream.iter_content(self.chunk_size)
except (requests.exceptions.Timeout, except (
requests.exceptions.Timeout,
requests.exceptions.ConnectionError) as exc: requests.exceptions.ConnectionError) as exc:
raise RuntimeError( raise RuntimeError(
'Connection errors or timeout reached ' 'Connection errors or timeout reached '
@ -278,9 +278,10 @@ class HttpStreamWrapper:
try: try:
self.stream.raise_for_status() self.stream.raise_for_status()
except requests.exceptions.HTTPError as exc: except requests.exceptions.HTTPError as exc:
raise RuntimeError('Invalid HTTP response' raise RuntimeError(
'trying to open stream for resource: %s\n' 'Invalid HTTP response'
'via HttpStreamWrapper: %s' % (self.url, exc)) 'trying to open stream for resource: %s\n'
'via HttpStreamWrapper: %s' % (self.url, exc))
def __enter__(self): def __enter__(self):
return self return self
@ -295,12 +296,11 @@ class HttpStreamWrapper:
def _reopen_stream(self): def _reopen_stream(self):
self.stream.connection.close() self.stream.connection.close()
try: try:
self.stream = requests.get(self.url, self.stream = requests.get(
stream=True, self.url, stream=True, verify=False, timeout=30)
verify=False,
timeout=30)
self.stream_iterator = self.stream.iter_content(self.chunk_size) self.stream_iterator = self.stream.iter_content(self.chunk_size)
except (requests.exceptions.Timeout, except (
requests.exceptions.Timeout,
requests.exceptions.ConnectionError) as exc: requests.exceptions.ConnectionError) as exc:
raise RuntimeError( raise RuntimeError(
'Connection errors or timeout reached ' 'Connection errors or timeout reached '
@ -309,9 +309,10 @@ class HttpStreamWrapper:
try: try:
self.stream.raise_for_status() self.stream.raise_for_status()
except requests.exceptions.HTTPError as exc: except requests.exceptions.HTTPError as exc:
raise RuntimeError('Invalid HTTP response' raise RuntimeError(
'trying to reopen stream for resource: %s\n' 'Invalid HTTP response'
'via HttpStreamWrapper: %s' % (self.url, exc)) 'trying to reopen stream for resource: %s\n'
'via HttpStreamWrapper: %s' % (self.url, exc))
self._content_consumed = False self._content_consumed = False
def _enhance_buffer(self): def _enhance_buffer(self):
@ -334,7 +335,8 @@ class HttpStreamWrapper:
while '\n' not in self.buffer: while '\n' not in self.buffer:
try: try:
self._enhance_buffer() self._enhance_buffer()
except (StopIteration, TypeError, except (
StopIteration, TypeError,
requests.exceptions.StreamConsumedError): requests.exceptions.StreamConsumedError):
self._content_consumed = True self._content_consumed = True
break break
@ -352,7 +354,8 @@ class HttpStreamWrapper:
while len(self.buffer) < chunk_size: while len(self.buffer) < chunk_size:
try: try:
self._enhance_buffer() self._enhance_buffer()
except (StopIteration, TypeError, except (
StopIteration, TypeError,
requests.exceptions.StreamConsumedError): requests.exceptions.StreamConsumedError):
break break
if len(self.buffer) > chunk_size: if len(self.buffer) > chunk_size:

View File

@ -3,7 +3,6 @@ from yandextank.common.util import Drain, Chopper
class TestDrain(object): class TestDrain(object):
def test_run(self): def test_run(self):
""" """
Test drain's run function (in a same thread) Test drain's run function (in a same thread)
@ -38,7 +37,6 @@ class TestDrain(object):
class TestChopper(object): class TestChopper(object):
def test_output(self): def test_output(self):
source = (range(i) for i in range(5)) source = (range(i) for i in range(5))
expected = [0, 0, 1, 0, 1, 2, 0, 1, 2, 3] expected = [0, 0, 1, 0, 1, 2, 0, 1, 2, 3]

View File

@ -43,7 +43,6 @@ class Drain(th.Thread):
class SecuredShell(object): class SecuredShell(object):
def __init__(self, host, port, username, timeout): def __init__(self, host, port, username, timeout):
self.host = host self.host = host
self.port = port self.port = port
@ -52,20 +51,22 @@ class SecuredShell(object):
def connect(self): def connect(self):
logger.debug( logger.debug(
"Opening SSH connection to {host}:{port}".format(host=self.host, "Opening SSH connection to {host}:{port}".format(
port=self.port)) host=self.host, port=self.port))
client = SSHClient() client = SSHClient()
client.load_system_host_keys() client.load_system_host_keys()
client.set_missing_host_key_policy(AutoAddPolicy()) client.set_missing_host_key_policy(AutoAddPolicy())
try: try:
client.connect(self.host, client.connect(
port=self.port, self.host,
username=self.username, port=self.port,
timeout=self.timeout, ) username=self.username,
timeout=self.timeout, )
except ValueError as e: except ValueError as e:
logger.error(e) logger.error(e)
logger.warning(""" logger.warning(
"""
Patching Crypto.Cipher.AES.new and making another attempt. Patching Crypto.Cipher.AES.new and making another attempt.
See here for the details: See here for the details:
@ -82,10 +83,11 @@ http://uucode.com/blog/2015/02/20/workaround-for-ctr-mode-needs-counter-paramete
return orig_new(key, *ls) return orig_new(key, *ls)
Crypto.Cipher.AES.new = fixed_AES_new Crypto.Cipher.AES.new = fixed_AES_new
client.connect(self.host, client.connect(
port=self.port, self.host,
username=self.username, port=self.port,
timeout=self.timeout, ) username=self.username,
timeout=self.timeout, )
return client return client
def execute(self, cmd): def execute(self, cmd):
@ -107,19 +109,17 @@ http://uucode.com/blog/2015/02/20/workaround-for-ctr-mode-needs-counter-paramete
return self.execute("mkdir -p %s" % path) return self.execute("mkdir -p %s" % path)
def send_file(self, local_path, remote_path): def send_file(self, local_path, remote_path):
logger.info("Sending [{local}] to {host}:[{remote}]".format( logger.info(
local=local_path, "Sending [{local}] to {host}:[{remote}]".format(
host=self.host, local=local_path, host=self.host, remote=remote_path))
remote=remote_path))
with self.connect() as client, client.open_sftp() as sftp: with self.connect() as client, client.open_sftp() as sftp:
result = sftp.put(local_path, remote_path) result = sftp.put(local_path, remote_path)
return result return result
def get_file(self, remote_path, local_path): def get_file(self, remote_path, local_path):
logger.info("Receiving from {host}:[{remote}] to [{local}]".format( logger.info(
local=local_path, "Receiving from {host}:[{remote}] to [{local}]".format(
host=self.host, local=local_path, host=self.host, remote=remote_path))
remote=remote_path))
with self.connect() as client, client.open_sftp() as sftp: with self.connect() as client, client.open_sftp() as sftp:
result = sftp.get(remote_path, local_path) result = sftp.get(remote_path, local_path)
return result return result
@ -129,39 +129,27 @@ http://uucode.com/blog/2015/02/20/workaround-for-ctr-mode-needs-counter-paramete
def check_ssh_connection(): def check_ssh_connection():
logging.basicConfig(level=logging.DEBUG, logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s') level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
logging.getLogger("paramiko.transport").setLevel(logging.DEBUG) logging.getLogger("paramiko.transport").setLevel(logging.DEBUG)
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Test SSH connection for monitoring.') description='Test SSH connection for monitoring.')
parser.add_argument( parser.add_argument(
'-e', '--endpoint', '-e', '--endpoint', default='example.org', help='which host to try')
default='example.org',
help='which host to try')
parser.add_argument( parser.add_argument(
'-u', '--username', '-u', '--username', default=os.getlogin(), help='SSH username')
default=os.getlogin(),
help='SSH username')
parser.add_argument( parser.add_argument('-p', '--port', default=22, type=int, help='SSH port')
'-p', '--port',
default=22,
type=int,
help='SSH port')
args = parser.parse_args() args = parser.parse_args()
logging.info( logging.info(
"Checking SSH to %s@%s:%d", "Checking SSH to %s@%s:%d", args.username, args.endpoint, args.port)
args.username,
args.endpoint,
args.port)
ssh = SecuredShell(args.endpoint, args.port, args.username, 10) ssh = SecuredShell(args.endpoint, args.port, args.username, 10)
print(ssh.execute("ls -l")) print(ssh.execute("ls -l"))
class AsyncSession(object): class AsyncSession(object):
def __init__(self, ssh, cmd): def __init__(self, ssh, cmd):
self.client = ssh.connect() self.client = ssh.connect()
self.session = self.client.get_transport().open_session() self.session = self.client.get_transport().open_session()
@ -427,8 +415,8 @@ def expand_time(str_time, default_unit='s', multiplier=1):
result += value * 60 * 60 * 24 * 7 result += value * 60 * 60 * 24 * 7
continue continue
else: else:
raise ValueError("String contains unsupported unit %s: %s" % raise ValueError(
(unit, str_time)) "String contains unsupported unit %s: %s" % (unit, str_time))
return int(result * multiplier) return int(result * multiplier)
@ -460,11 +448,12 @@ def execute(cmd, shell=False, poll_period=1.0, catch_out=False):
cmd = shlex.split(cmd) cmd = shlex.split(cmd)
if catch_out: if catch_out:
process = subprocess.Popen(cmd, process = subprocess.Popen(
shell=shell, cmd,
stderr=subprocess.PIPE, shell=shell,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True) stdout=subprocess.PIPE,
close_fds=True)
else: else:
process = subprocess.Popen(cmd, shell=shell, close_fds=True) process = subprocess.Popen(cmd, shell=shell, close_fds=True)
@ -503,14 +492,12 @@ def pairs(lst):
def update_status(status, multi_key, value): def update_status(status, multi_key, value):
if len(multi_key) > 1: if len(multi_key) > 1:
update_status( update_status(status.setdefault(multi_key[0], {}), multi_key[1:], value)
status.setdefault(multi_key[0], {}), multi_key[1:], value)
else: else:
status[multi_key[0]] = value status[multi_key[0]] = value
class AddressWizard: class AddressWizard:
def __init__(self): def __init__(self):
self.lookup_fn = socket.getaddrinfo self.lookup_fn = socket.getaddrinfo
self.socket_class = socket.socket self.socket_class = socket.socket
@ -554,8 +541,9 @@ class AddressWizard:
resolved = self.lookup_fn(address_str, port) resolved = self.lookup_fn(address_str, port)
logger.debug("Lookup result: %s", resolved) logger.debug("Lookup result: %s", resolved)
except Exception as exc: except Exception as exc:
logger.debug("Exception trying to resolve hostname %s : %s", logger.debug(
address_str, traceback.format_exc(exc)) "Exception trying to resolve hostname %s : %s", address_str,
traceback.format_exc(exc))
msg = "Failed to resolve hostname: %s. Error: %s" msg = "Failed to resolve hostname: %s. Error: %s"
raise RuntimeError(msg % (address_str, exc)) raise RuntimeError(msg % (address_str, exc))
@ -565,7 +553,8 @@ class AddressWizard:
if explicit_port: if explicit_port:
logger.warn( logger.warn(
"Using phantom.port option is deprecated. Use phantom.address=[address]:port instead") "Using phantom.port option is deprecated. Use phantom.address=[address]:port instead"
)
port = int(explicit_port) port = int(explicit_port)
elif not port: elif not port:
port = 80 port = 80
@ -574,8 +563,9 @@ class AddressWizard:
try: try:
self.__test(family, (parsed_ip, port)) self.__test(family, (parsed_ip, port))
except RuntimeError as exc: except RuntimeError as exc:
logger.warn("Failed TCP connection test using [%s]:%s", logger.warn(
parsed_ip, port) "Failed TCP connection test using [%s]:%s", parsed_ip,
port)
continue continue
return is_v6, parsed_ip, int(port), address_str return is_v6, parsed_ip, int(port), address_str
@ -589,8 +579,9 @@ class AddressWizard:
test_sock.settimeout(5) test_sock.settimeout(5)
test_sock.connect(sa) test_sock.connect(sa)
except Exception as exc: except Exception as exc:
logger.debug("Exception on connect attempt [%s]:%s : %s", sa[0], logger.debug(
sa[1], traceback.format_exc(exc)) "Exception on connect attempt [%s]:%s : %s", sa[0], sa[1],
traceback.format_exc(exc))
msg = "TCP Connection test failed for [%s]:%s, use phantom.connection_test=0 to disable it" msg = "TCP Connection test failed for [%s]:%s, use phantom.connection_test=0 to disable it"
raise RuntimeError(msg % (sa[0], sa[1])) raise RuntimeError(msg % (sa[0], sa[1]))
finally: finally:
@ -598,7 +589,6 @@ class AddressWizard:
class Chopper(object): class Chopper(object):
def __init__(self, source): def __init__(self, source):
self.source = source self.source = source

View File

@ -12,34 +12,40 @@ def main():
'-c', '-c',
'--config', '--config',
action='append', action='append',
help="Path to INI file containing run options, multiple options accepted") help="Path to INI file containing run options, multiple options accepted"
parser.add_option('-f', )
'--fail-lock', parser.add_option(
action='store_true', '-f',
dest='lock_fail', '--fail-lock',
help="Don't wait for lock to release, fail test instead") action='store_true',
dest='lock_fail',
help="Don't wait for lock to release, fail test instead")
parser.add_option( parser.add_option(
'-i', '-i',
'--ignore-lock', '--ignore-lock',
action='store_true', action='store_true',
dest='ignore_lock', dest='ignore_lock',
help="Ignore lock files from concurrent instances, has precedence before --lock-fail") help="Ignore lock files from concurrent instances, has precedence before --lock-fail"
parser.add_option('-k', )
'--lock-dir', parser.add_option(
action='store', '-k',
dest='lock_dir', '--lock-dir',
type="string", action='store',
help="Directory for lock file") dest='lock_dir',
parser.add_option('-l', type="string",
'--log', help="Directory for lock file")
action='store', parser.add_option(
default="tank.log", '-l',
help="Tank log file location") '--log',
parser.add_option('-m', action='store',
'--manual-start', default="tank.log",
action='store_true', help="Tank log file location")
dest='manual_start', parser.add_option(
help="Wait for Enter key to start the test") '-m',
'--manual-start',
action='store_true',
dest='manual_start',
help="Wait for Enter key to start the test")
parser.add_option( parser.add_option(
'-n', '-n',
'--no-rc', '--no-rc',
@ -50,21 +56,25 @@ def main():
'-o', '-o',
'--option', '--option',
action='append', action='append',
help="Set config option, multiple options accepted, example: -o 'shellexec.start=pwd'") help="Set config option, multiple options accepted, example: -o 'shellexec.start=pwd'"
parser.add_option('-q', )
'--quiet', parser.add_option(
action='store_true', '-q',
help="Less console output, only errors and warnings") '--quiet',
action='store_true',
help="Less console output, only errors and warnings")
parser.add_option( parser.add_option(
'-s', '-s',
'--scheduled-start', '--scheduled-start',
action='store', action='store',
dest='scheduled_start', dest='scheduled_start',
help="Start test at specified time, format 'YYYY-MM-DD hh:mm:ss', date part is optional") help="Start test at specified time, format 'YYYY-MM-DD hh:mm:ss', date part is optional"
parser.add_option('-v', )
'--verbose', parser.add_option(
action='store_true', '-v',
help="More console output, +debug messages") '--verbose',
action='store_true',
help="More console output, +debug messages")
completion_helper = CompletionHelperOptionParser() completion_helper = CompletionHelperOptionParser()
completion_helper.handle_request(parser) completion_helper.handle_request(parser)

View File

@ -37,9 +37,11 @@ class RealConsoleMarkup(object):
def clean_markup(self, orig_str): def clean_markup(self, orig_str):
''' clean markup from string ''' ''' clean markup from string '''
for val in [self.YELLOW, self.RED, self.RESET, self.CYAN, for val in [
self.BG_MAGENTA, self.WHITE, self.BG_GREEN, self.GREEN, self.YELLOW, self.RED, self.RESET, self.CYAN, self.BG_MAGENTA,
self.BG_BROWN, self.RED_DARK, self.MAGENTA, self.BG_CYAN]: self.WHITE, self.BG_GREEN, self.GREEN, self.BG_BROWN,
self.RED_DARK, self.MAGENTA, self.BG_CYAN
]:
orig_str = orig_str.replace(val, '') orig_str = orig_str.replace(val, '')
return orig_str return orig_str
@ -101,8 +103,10 @@ class ConsoleTank:
if self.log_filename: if self.log_filename:
file_handler = logging.FileHandler(self.log_filename) file_handler = logging.FileHandler(self.log_filename)
file_handler.setLevel(logging.DEBUG) file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter( file_handler.setFormatter(
"%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s")) logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s"
))
logger.addHandler(file_handler) logger.addHandler(file_handler)
# create console handler with a higher log level # create console handler with a higher log level
@ -110,7 +114,8 @@ class ConsoleTank:
stderr_hdl = logging.StreamHandler(sys.stderr) stderr_hdl = logging.StreamHandler(sys.stderr)
fmt_verbose = logging.Formatter( fmt_verbose = logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s") "%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s"
)
fmt_regular = logging.Formatter( fmt_regular = logging.Formatter(
"%(asctime)s [%(levelname)s] %(message)s", "%H:%M:%S") "%(asctime)s [%(levelname)s] %(message)s", "%H:%M:%S")
@ -155,12 +160,13 @@ class ConsoleTank:
for filename in conf_files: for filename in conf_files:
if fnmatch.fnmatch(filename, '*.ini'): if fnmatch.fnmatch(filename, '*.ini'):
configs += [ configs += [
os.path.realpath(self.baseconfigs_location + os.sep + os.path.realpath(
filename) self.baseconfigs_location + os.sep + filename)
] ]
except OSError: except OSError:
self.log.warn(self.baseconfigs_location + self.log.warn(
' is not accessible to get configs list') self.baseconfigs_location +
' is not accessible to get configs list')
configs += [os.path.expanduser('~/.yandex-tank')] configs += [os.path.expanduser('~/.yandex-tank')]
return configs return configs
@ -172,8 +178,8 @@ class ConsoleTank:
"Lock files ignored. This is highly unrecommended practice!") "Lock files ignored. This is highly unrecommended practice!")
if self.options.lock_dir: if self.options.lock_dir:
self.core.set_option(self.core.SECTION, "lock_dir", self.core.set_option(
self.options.lock_dir) self.core.SECTION, "lock_dir", self.options.lock_dir)
while True: while True:
try: try:
@ -202,7 +208,8 @@ class ConsoleTank:
elif os.path.exists(os.path.realpath('load.conf')): elif os.path.exists(os.path.realpath('load.conf')):
# just for old 'lunapark' compatibility # just for old 'lunapark' compatibility
self.log.warn( self.log.warn(
"Using 'load.conf' is unrecommended, please use 'load.ini' instead") "Using 'load.conf' is unrecommended, please use 'load.ini' instead"
)
conf_file = os.path.realpath('load.conf') conf_file = os.path.realpath('load.conf')
configs += [conf_file] configs += [conf_file]
self.core.add_artifact_file(conf_file, True) self.core.add_artifact_file(conf_file, True)
@ -260,11 +267,12 @@ class ConsoleTank:
self.core.plugins_configure() self.core.plugins_configure()
self.core.plugins_prepare_test() self.core.plugins_prepare_test()
if self.scheduled_start: if self.scheduled_start:
self.log.info("Waiting scheduled time: %s...", self.log.info(
self.scheduled_start) "Waiting scheduled time: %s...", self.scheduled_start)
while datetime.datetime.now() < self.scheduled_start: while datetime.datetime.now() < self.scheduled_start:
self.log.debug("Not yet: %s < %s", datetime.datetime.now(), self.log.debug(
self.scheduled_start) "Not yet: %s < %s",
datetime.datetime.now(), self.scheduled_start)
time.sleep(1) time.sleep(1)
self.log.info("Time has come: %s", datetime.datetime.now()) self.log.info("Time has come: %s", datetime.datetime.now())
@ -283,13 +291,14 @@ class ConsoleTank:
sys.stdout.write(RealConsoleMarkup.RESET) sys.stdout.write(RealConsoleMarkup.RESET)
sys.stdout.write(RealConsoleMarkup.TOTAL_RESET) sys.stdout.write(RealConsoleMarkup.TOTAL_RESET)
self.signal_count += 1 self.signal_count += 1
self.log.debug("Caught KeyboardInterrupt: %s", self.log.debug(
traceback.format_exc(ex)) "Caught KeyboardInterrupt: %s", traceback.format_exc(ex))
try: try:
retcode = self.__graceful_shutdown() retcode = self.__graceful_shutdown()
except KeyboardInterrupt as ex: except KeyboardInterrupt as ex:
self.log.debug("Caught KeyboardInterrupt again: %s", self.log.debug(
traceback.format_exc(ex)) "Caught KeyboardInterrupt again: %s",
traceback.format_exc(ex))
self.log.info( self.log.info(
"User insists on exiting, aborting graceful shutdown...") "User insists on exiting, aborting graceful shutdown...")
retcode = 1 retcode = 1
@ -310,7 +319,6 @@ class ConsoleTank:
class DevNullOpts: class DevNullOpts:
def __init__(self): def __init__(self):
pass pass
@ -318,21 +326,23 @@ class DevNullOpts:
class CompletionHelperOptionParser(OptionParser): class CompletionHelperOptionParser(OptionParser):
def __init__(self): def __init__(self):
OptionParser.__init__(self, add_help_option=False) OptionParser.__init__(self, add_help_option=False)
self.add_option('--bash-switches-list', self.add_option(
action='store_true', '--bash-switches-list',
dest="list_switches", action='store_true',
help="Options list") dest="list_switches",
self.add_option('--bash-options-prev', help="Options list")
action='store', self.add_option(
dest="list_options_prev", '--bash-options-prev',
help="Options list") action='store',
self.add_option('--bash-options-cur', dest="list_options_prev",
action='store', help="Options list")
dest="list_options_cur", self.add_option(
help="Options list") '--bash-options-cur',
action='store',
dest="list_options_cur",
help="Options list")
def error(self, msg): def error(self, msg):
pass pass
@ -362,8 +372,9 @@ class CompletionHelperOptionParser(OptionParser):
plugin_keys = cmdtank.core.config.get_options( plugin_keys = cmdtank.core.config.get_options(
cmdtank.core.SECTION, cmdtank.core.PLUGIN_PREFIX) cmdtank.core.SECTION, cmdtank.core.PLUGIN_PREFIX)
for (plugin_name, plugin_path) in plugin_keys: for (plugin_name, plugin_path) in plugin_keys:
opts.append(cmdtank.core.SECTION + '.' + opts.append(
cmdtank.core.PLUGIN_PREFIX + plugin_name + '=') cmdtank.core.SECTION + '.' + cmdtank.core.PLUGIN_PREFIX +
plugin_name + '=')
for plugin in cmdtank.core.plugins: for plugin in cmdtank.core.plugins:
for option in plugin.get_available_options(): for option in plugin.get_available_options():

View File

@ -52,11 +52,10 @@ class Var(object):
class Int(Var): class Int(Var):
def __init__(self, value=0): def __init__(self, value=0):
if not isinstance(value, int): if not isinstance(value, int):
raise ValueError("Value should be an integer, but it is '%s'" % raise ValueError(
type(value)) "Value should be an integer, but it is '%s'" % type(value))
super(Int, self).__init__(value) super(Int, self).__init__(value)
def inc(self, delta=1): def inc(self, delta=1):
@ -76,8 +75,9 @@ class Metric(object):
if timestamp is None: if timestamp is None:
timestamp = int(time.time()) timestamp = int(time.time())
elif not isinstance(timestamp, int): elif not isinstance(timestamp, int):
raise ValueError("Timestamp should be an integer, but it is '%s'" % raise ValueError(
type(timestamp)) "Timestamp should be an integer, but it is '%s'" %
type(timestamp))
self.metric.put((timestamp, value)) self.metric.put((timestamp, value))
def next(self): def next(self):

View File

@ -30,12 +30,10 @@ if sys.version_info[0] < 3:
else: else:
import configparser as ConfigParser import configparser as ConfigParser
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class Job(object): class Job(object):
def __init__( def __init__(
self, self,
name, name,
@ -114,8 +112,10 @@ class TankCore(object):
return self.uuid return self.uuid
def get_available_options(self): def get_available_options(self):
return ["artifacts_base_dir", "artifacts_dir", "flush_config_to", return [
"taskset_path", "affinity"] "artifacts_base_dir", "artifacts_dir", "flush_config_to",
"taskset_path", "affinity"
]
def load_configs(self, configs): def load_configs(self, configs):
""" Tells core to load configs set into options storage """ """ Tells core to load configs set into options storage """
@ -129,8 +129,8 @@ class TankCore(object):
self.config.flush() self.config.flush()
self.add_artifact_file(self.config.file) self.add_artifact_file(self.config.file)
self.set_option(self.SECTION, self.PID_OPTION, str(os.getpid())) self.set_option(self.SECTION, self.PID_OPTION, str(os.getpid()))
self.flush_config_to = self.get_option(self.SECTION, "flush_config_to", self.flush_config_to = self.get_option(
"") self.SECTION, "flush_config_to", "")
if self.flush_config_to: if self.flush_config_to:
self.config.flush(self.flush_config_to) self.config.flush(self.flush_config_to)
@ -148,29 +148,26 @@ class TankCore(object):
self.artifacts_dir_name = self.get_option( self.artifacts_dir_name = self.get_option(
self.SECTION, "artifacts_dir", "") self.SECTION, "artifacts_dir", "")
self.taskset_path = self.get_option(self.SECTION, 'taskset_path', self.taskset_path = self.get_option(
'taskset') self.SECTION, 'taskset_path', 'taskset')
self.taskset_affinity = self.get_option(self.SECTION, 'affinity', '') self.taskset_affinity = self.get_option(self.SECTION, 'affinity', '')
options = self.config.get_options(self.SECTION, self.PLUGIN_PREFIX) options = self.config.get_options(self.SECTION, self.PLUGIN_PREFIX)
for (plugin_name, plugin_path) in options: for (plugin_name, plugin_path) in options:
if not plugin_path: if not plugin_path:
logger.debug("Seems the plugin '%s' was disabled", logger.debug("Seems the plugin '%s' was disabled", plugin_name)
plugin_name)
continue continue
logger.debug("Loading plugin %s from %s", plugin_name, logger.debug("Loading plugin %s from %s", plugin_name, plugin_path)
plugin_path)
# FIXME cleanup an old deprecated plugin path format # FIXME cleanup an old deprecated plugin path format
if '/' in plugin_path: if '/' in plugin_path:
logger.warning("Deprecated plugin path format: %s\n" logger.warning(
"Should be in pythonic format. Example:\n" "Deprecated plugin path format: %s\n"
" plugin_jmeter=yandextank.plugins.JMeter", "Should be in pythonic format. Example:\n"
plugin_path) " plugin_jmeter=yandextank.plugins.JMeter", plugin_path)
if plugin_path.startswith("Tank/Plugins/"): if plugin_path.startswith("Tank/Plugins/"):
plugin_path = "yandextank.plugins." + \ plugin_path = "yandextank.plugins." + \
plugin_path.split('/')[-1].split('.')[0] plugin_path.split('/')[-1].split('.')[0]
logger.warning("Converted plugin path to %s", logger.warning("Converted plugin path to %s", plugin_path)
plugin_path)
else: else:
raise ValueError( raise ValueError(
"Couldn't convert plugin path to new format:\n %s" % "Couldn't convert plugin path to new format:\n %s" %
@ -183,7 +180,8 @@ class TankCore(object):
"Deprecated plugin path format: %s\n" "Deprecated plugin path format: %s\n"
"Tank plugins are now orginized using" "Tank plugins are now orginized using"
" namespace packages. Example:\n" " namespace packages. Example:\n"
" plugin_jmeter=yandextank.plugins.JMeter", plugin_path) " plugin_jmeter=yandextank.plugins.JMeter",
plugin_path)
plugin_path = plugin_path.replace( plugin_path = plugin_path.replace(
"yatank_internal_", "yandextank.plugins.") "yatank_internal_", "yandextank.plugins.")
if plugin_path.startswith("yatank_"): if plugin_path.startswith("yatank_"):
@ -191,7 +189,8 @@ class TankCore(object):
"Deprecated plugin path format: %s\n" "Deprecated plugin path format: %s\n"
"Tank plugins are now orginized using" "Tank plugins are now orginized using"
" namespace packages. Example:\n" " namespace packages. Example:\n"
" plugin_jmeter=yandextank.plugins.JMeter", plugin_path) " plugin_jmeter=yandextank.plugins.JMeter",
plugin_path)
plugin_path = plugin_path.replace( plugin_path = plugin_path.replace(
"yatank_", "yandextank.plugins.") "yatank_", "yandextank.plugins.")
@ -201,9 +200,10 @@ class TankCore(object):
instance = getattr(plugin, 'Plugin')(self) instance = getattr(plugin, 'Plugin')(self)
except: except:
logger.warning( logger.warning(
"Deprecated plugin classname: %s. Should be 'Plugin'", plugin) "Deprecated plugin classname: %s. Should be 'Plugin'",
instance = getattr(plugin, plugin_path.split( plugin)
'.')[-1] + 'Plugin')(self) instance = getattr(
plugin, plugin_path.split('.')[-1] + 'Plugin')(self)
self.plugins.append(instance) self.plugins.append(instance)
@ -247,26 +247,16 @@ class TankCore(object):
gen = None gen = None
self.job = Job( self.job = Job(
name=self.get_option( name=self.get_option(self.SECTION_META, "job_name",
self.SECTION_META, 'none').decode('utf8'),
"job_name", description=self.get_option(self.SECTION_META, "job_dsc",
'none').decode('utf8'), '').decode('utf8'),
description=self.get_option( task=self.get_option(self.SECTION_META, 'task',
self.SECTION_META, 'dir').decode('utf8'),
"job_dsc", version=self.get_option(self.SECTION_META, 'ver',
'').decode('utf8'), '').decode('utf8'),
task=self.get_option(
self.SECTION_META,
'task',
'dir').decode('utf8'),
version=self.get_option(
self.SECTION_META,
'ver',
'').decode('utf8'),
config_copy=self.get_option( config_copy=self.get_option(
self.SECTION_META, self.SECTION_META, 'copy_config_to', 'config_copy'),
'copy_config_to',
'config_copy'),
monitoring_plugin=mon, monitoring_plugin=mon,
aggregator_plugin=aggregator, aggregator_plugin=aggregator,
generator_plugin=gen, generator_plugin=gen,
@ -319,9 +309,7 @@ class TankCore(object):
end_time = time.time() end_time = time.time()
diff = end_time - begin_time diff = end_time - begin_time
logger.debug("Polling took %s", diff) logger.debug("Polling took %s", diff)
logger.debug("Tank status:\n%s", logger.debug("Tank status:\n%s", json.dumps(self.status, indent=2))
json.dumps(self.status,
indent=2))
# screen refresh every 0.5 s # screen refresh every 0.5 s
if diff < 0.5: if diff < 0.5:
time.sleep(0.5 - diff) time.sleep(0.5 - diff)
@ -340,8 +328,8 @@ class TankCore(object):
logger.debug("RC after: %s", retcode) logger.debug("RC after: %s", retcode)
except Exception as ex: except Exception as ex:
logger.error("Failed finishing plugin %s: %s", plugin, ex) logger.error("Failed finishing plugin %s: %s", plugin, ex)
logger.debug("Failed finishing plugin: %s", logger.debug(
traceback.format_exc(ex)) "Failed finishing plugin: %s", traceback.format_exc(ex))
if not retcode: if not retcode:
retcode = 1 retcode = 1
@ -363,10 +351,10 @@ class TankCore(object):
retcode = plugin.post_process(retcode) retcode = plugin.post_process(retcode)
logger.debug("RC after: %s", retcode) logger.debug("RC after: %s", retcode)
except Exception as ex: except Exception as ex:
logger.error("Failed post-processing plugin %s: %s", plugin, logger.error("Failed post-processing plugin %s: %s", plugin, ex)
ex) logger.debug(
logger.debug("Failed post-processing plugin: %s", "Failed post-processing plugin: %s",
traceback.format_exc(ex)) traceback.format_exc(ex))
if not retcode: if not retcode:
retcode = 1 retcode = 1
@ -380,16 +368,15 @@ class TankCore(object):
def taskset(self, pid, path, affinity): def taskset(self, pid, path, affinity):
if affinity: if affinity:
args = "%s -pc %s %s" % (path, affinity, pid) args = "%s -pc %s %s" % (path, affinity, pid)
retcode, stdout, stderr = execute(args, retcode, stdout, stderr = execute(
shell=True, args, shell=True, poll_period=0.1, catch_out=True)
poll_period=0.1,
catch_out=True)
logger.debug('taskset stdout: %s', stdout) logger.debug('taskset stdout: %s', stdout)
if retcode != 0: if retcode != 0:
raise KeyError(stderr) raise KeyError(stderr)
else: else:
logger.info("Enabled taskset for pid %s with affinity %s", logger.info(
str(pid), affinity) "Enabled taskset for pid %s with affinity %s",
str(pid), affinity)
def __collect_artifacts(self): def __collect_artifacts(self):
logger.debug("Collecting artifacts") logger.debug("Collecting artifacts")
@ -427,8 +414,8 @@ class TankCore(object):
logger.debug("Expanding shell option %s", value) logger.debug("Expanding shell option %s", value)
retcode, stdout, stderr = execute(value[1:-1], True, 0.1, True) retcode, stdout, stderr = execute(value[1:-1], True, 0.1, True)
if retcode or stderr: if retcode or stderr:
raise ValueError("Error expanding option %s, RC: %s" % raise ValueError(
(value, retcode)) "Error expanding option %s, RC: %s" % (value, retcode))
value = stdout.strip() value = stdout.strip()
return value return value
@ -447,9 +434,10 @@ class TankCore(object):
Retrieve a plugin of desired class, KeyError raised otherwise Retrieve a plugin of desired class, KeyError raised otherwise
""" """
logger.debug("Searching for plugin: %s", plugin_class) logger.debug("Searching for plugin: %s", plugin_class)
matches = [plugin matches = [
for plugin in self.plugins plugin for plugin in self.plugins
if isinstance(plugin, plugin_class)] if isinstance(plugin, plugin_class)
]
if len(matches) > 0: if len(matches) > 0:
if len(matches) > 1: if len(matches) > 1:
logger.debug( logger.debug(
@ -457,8 +445,7 @@ class TankCore(object):
plugin_class) plugin_class)
return matches[-1] return matches[-1]
else: else:
raise KeyError("Requested plugin type not found: %s" % raise KeyError("Requested plugin type not found: %s" % plugin_class)
plugin_class)
def __collect_file(self, filename, keep_original=False): def __collect_file(self, filename, keep_original=False):
""" """
@ -487,28 +474,30 @@ class TankCore(object):
Add file to be stored as result artifact on post-process phase Add file to be stored as result artifact on post-process phase
""" """
if filename: if filename:
logger.debug("Adding artifact file to collect (keep=%s): %s", logger.debug(
keep_original, filename) "Adding artifact file to collect (keep=%s): %s", keep_original,
filename)
self.artifact_files[filename] = keep_original self.artifact_files[filename] = keep_original
def apply_shorthand_options(self, options, default_section='DEFAULT'): def apply_shorthand_options(self, options, default_section='DEFAULT'):
for option_str in options: for option_str in options:
try: try:
section = option_str[:option_str.index('.')] section = option_str[:option_str.index('.')]
option = option_str[ option = option_str[option_str.index('.') + 1:option_str.index(
option_str.index('.') + 1:option_str.index('=')] '=')]
except ValueError: except ValueError:
section = default_section section = default_section
option = option_str[:option_str.index('=')] option = option_str[:option_str.index('=')]
value = option_str[option_str.index('=') + 1:] value = option_str[option_str.index('=') + 1:]
logger.debug("Override option: %s => [%s] %s=%s", option_str, logger.debug(
section, option, value) "Override option: %s => [%s] %s=%s", option_str, section,
option, value)
self.set_option(section, option, value) self.set_option(section, option, value)
def get_lock_dir(self): def get_lock_dir(self):
if not self.lock_dir: if not self.lock_dir:
self.lock_dir = self.get_option(self.SECTION, "lock_dir", self.lock_dir = self.get_option(
self.LOCK_DIR) self.SECTION, "lock_dir", self.LOCK_DIR)
return os.path.expanduser(self.lock_dir) return os.path.expanduser(self.lock_dir)
@ -516,8 +505,8 @@ class TankCore(object):
if not force and self.__there_is_locks(): if not force and self.__there_is_locks():
raise RuntimeError("There is lock files") raise RuntimeError("There is lock files")
fh, self.lock_file = tempfile.mkstemp('.lock', 'lunapark_', fh, self.lock_file = tempfile.mkstemp(
self.get_lock_dir()) '.lock', 'lunapark_', self.get_lock_dir())
os.close(fh) os.close(fh)
os.chmod(self.lock_file, 0o644) os.chmod(self.lock_file, 0o644)
self.config.file = self.lock_file self.config.file = self.lock_file
@ -542,18 +531,19 @@ class TankCore(object):
info.read(full_name) info.read(full_name)
pid = info.get(TankCore.SECTION, self.PID_OPTION) pid = info.get(TankCore.SECTION, self.PID_OPTION)
if not pid_exists(int(pid)): if not pid_exists(int(pid)):
logger.debug("Lock PID %s not exists, ignoring and " logger.debug(
"trying to remove", pid) "Lock PID %s not exists, ignoring and "
"trying to remove", pid)
try: try:
os.remove(full_name) os.remove(full_name)
except Exception as exc: except Exception as exc:
logger.debug("Failed to delete lock %s: %s", logger.debug(
full_name, exc) "Failed to delete lock %s: %s", full_name, exc)
else: else:
retcode = True retcode = True
except Exception as exc: except Exception as exc:
logger.warn("Failed to load info from lock %s: %s", logger.warn(
full_name, exc) "Failed to load info from lock %s: %s", full_name, exc)
retcode = True retcode = True
return retcode return retcode
@ -584,14 +574,15 @@ class TankCore(object):
plugin.close() plugin.close()
except Exception as ex: except Exception as ex:
logger.error("Failed closing plugin %s: %s", plugin, ex) logger.error("Failed closing plugin %s: %s", plugin, ex)
logger.debug("Failed closing plugin: %s", logger.debug(
traceback.format_exc(ex)) "Failed closing plugin: %s", traceback.format_exc(ex))
@property @property
def artifacts_dir(self): def artifacts_dir(self):
if not self._artifacts_dir: if not self._artifacts_dir:
if not self.artifacts_dir_name: if not self.artifacts_dir_name:
date_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S.") date_str = datetime.datetime.now().strftime(
"%Y-%m-%d_%H-%M-%S.")
self.artifacts_dir_name = tempfile.mkdtemp( self.artifacts_dir_name = tempfile.mkdtemp(
"", date_str, self.artifacts_base_dir) "", date_str, self.artifacts_base_dir)
elif not os.path.isdir(self.artifacts_dir_name): elif not os.path.isdir(self.artifacts_dir_name):
@ -621,8 +612,9 @@ class ConfigManager(object):
def load_files(self, configs): def load_files(self, configs):
""" Read configs set into storage """ """ Read configs set into storage """
logger.debug("Reading configs: %s", configs) logger.debug("Reading configs: %s", configs)
config_filenames = [resource.resource_filename(config) config_filenames = [
for config in configs] resource.resource_filename(config) for config in configs
]
try: try:
self.config.read(config_filenames) self.config.read(config_filenames)
except Exception as ex: except Exception as ex:
@ -644,13 +636,13 @@ class ConfigManager(object):
try: try:
for option in self.config.options(section): for option in self.config.options(section):
if not prefix or option.find(prefix) == 0: if not prefix or option.find(prefix) == 0:
res += [(option[len(prefix):], res += [(
self.config.get(section, option))] option[len(prefix):], self.config.get(section, option))]
except ConfigParser.NoSectionError as ex: except ConfigParser.NoSectionError as ex:
logger.warning("No section: %s", ex) logger.warning("No section: %s", ex)
logger.debug("Section: [%s] prefix: '%s' options:\n%s", section, logger.debug(
prefix, res) "Section: [%s] prefix: '%s' options:\n%s", section, prefix, res)
return res return res
def find_sections(self, prefix): def find_sections(self, prefix):

View File

@ -38,8 +38,8 @@ class Worker(object):
np.linspace(5000, 9900, 50)) # 100µs accuracy np.linspace(5000, 9900, 50)) # 100µs accuracy
bins = np.append(bins, bins = np.append(bins,
np.linspace(10, 499, 490) * 1000) # 1ms accuracy np.linspace(10, 499, 490) * 1000) # 1ms accuracy
bins = np.append(bins, np.linspace(500, 2995, 500) * bins = np.append(bins,
1000) # 5ms accuracy np.linspace(500, 2995, 500) * 1000) # 5ms accuracy
bins = np.append(bins, np.linspace(3000, 9990, 700) * bins = np.append(bins, np.linspace(3000, 9990, 700) *
1000) # 10ms accuracy 1000) # 10ms accuracy
bins = np.append(bins, np.linspace(10000, 29950, 400) * bins = np.append(bins, np.linspace(10000, 29950, 400) *
@ -119,7 +119,6 @@ class Worker(object):
class DataPoller(object): class DataPoller(object):
def __init__(self, source, poll_period): def __init__(self, source, poll_period):
self.poll_period = poll_period self.poll_period = poll_period
self.source = source self.source = source
@ -132,7 +131,6 @@ class DataPoller(object):
class Aggregator(object): class Aggregator(object):
def __init__(self, source, config, verbose_histogram): def __init__(self, source, config, verbose_histogram):
self.worker = Worker(config, verbose_histogram) self.worker = Worker(config, verbose_histogram)
self.source = source self.source = source
@ -144,12 +142,11 @@ class Aggregator(object):
start_time = time.time() start_time = time.time()
result = { result = {
"ts": ts, "ts": ts,
"tagged": { "tagged":
tag: self.worker.aggregate(data) {tag: self.worker.aggregate(data)
for tag, data in by_tag for tag, data in by_tag},
},
"overall": self.worker.aggregate(chunk), "overall": self.worker.aggregate(chunk),
} }
logger.debug("Aggregation time: %.2fms", logger.debug(
(time.time() - start_time) * 1000) "Aggregation time: %.2fms", (time.time() - start_time) * 1000)
yield result yield result

View File

@ -23,9 +23,8 @@ class TimeChopper(object):
grouped = chunk.groupby(level=0) grouped = chunk.groupby(level=0)
for group_key, group_data in list(grouped): for group_key, group_data in list(grouped):
if group_key in self.cache: if group_key in self.cache:
self.cache[group_key] = pd.concat([ self.cache[group_key] = pd.concat(
self.cache[group_key], group_data [self.cache[group_key], group_data])
])
else: else:
self.cache[group_key] = group_data self.cache[group_key] = group_data
while len(self.cache) > self.cache_size: while len(self.cache) > self.cache_size:

View File

@ -59,8 +59,8 @@ class Plugin(AbstractPlugin):
return ["verbose_histogram"] return ["verbose_histogram"]
def configure(self): def configure(self):
self.aggregator_config = json.loads(resource_string( self.aggregator_config = json.loads(
__name__, 'config/phout.json').decode('utf8')) resource_string(__name__, 'config/phout.json').decode('utf8'))
verbose_histogram_option = self.get_option("verbose_histogram", "0") verbose_histogram_option = self.get_option("verbose_histogram", "0")
self.verbose_histogram = ( self.verbose_histogram = (
verbose_histogram_option.lower() == "true") or ( verbose_histogram_option.lower() == "true") or (
@ -72,16 +72,15 @@ class Plugin(AbstractPlugin):
if self.reader and self.stats_reader: if self.reader and self.stats_reader:
pipeline = Aggregator( pipeline = Aggregator(
TimeChopper( TimeChopper(
DataPoller(source=self.reader, DataPoller(
poll_period=1), source=self.reader, poll_period=1), cache_size=3),
cache_size=3),
self.aggregator_config, self.aggregator_config,
self.verbose_histogram) self.verbose_histogram)
self.drain = Drain(pipeline, self.results) self.drain = Drain(pipeline, self.results)
self.drain.start() self.drain.start()
self.stats_drain = Drain( self.stats_drain = Drain(
Chopper(DataPoller(source=self.stats_reader, Chopper(DataPoller(
poll_period=1)), source=self.stats_reader, poll_period=1)),
self.stats) self.stats)
self.stats_drain.start() self.stats_drain.start()
else: else:

View File

@ -7,7 +7,6 @@ from conftest import MAX_TS, random_split
class TestChopper(object): class TestChopper(object):
def test_one_chunk(self, data): def test_one_chunk(self, data):
chopper = TimeChopper([data], 5) chopper = TimeChopper([data], 5)
result = list(chopper) result = list(chopper)
@ -29,8 +28,9 @@ class TestChopper(object):
chopper = TimeChopper(chunks, 5) chopper = TimeChopper(chunks, 5)
result = list(chopper) result = list(chopper)
assert len( assert len(
result) == MAX_TS, "DataFrame is splitted into proper number of chunks" result
) == MAX_TS, "DataFrame is splitted into proper number of chunks"
concatinated = pd.concat(r[1] for r in result) concatinated = pd.concat(r[1] for r in result)
assert len(data) == len(concatinated), "We did not lose anything" assert len(data) == len(concatinated), "We did not lose anything"
assert np.allclose(concatinated.values, assert np.allclose(
data.values), "We did not corrupt the data" concatinated.values, data.values), "We did not corrupt the data"

View File

@ -9,12 +9,12 @@ from yandextank.plugins.Aggregator.aggregator import Aggregator
from yandextank.plugins.Aggregator.chopper import TimeChopper from yandextank.plugins.Aggregator.chopper import TimeChopper
from yandextank.plugins.Aggregator.plugin import DataPoller from yandextank.plugins.Aggregator.plugin import DataPoller
AGGR_CONFIG = json.loads(resource_string("yandextank.plugins.Aggregator", AGGR_CONFIG = json.loads(
'config/phout.json').decode('utf-8')) resource_string("yandextank.plugins.Aggregator", 'config/phout.json')
.decode('utf-8'))
class TestPipeline(object): class TestPipeline(object):
def test_partially_reversed_data(self, data): def test_partially_reversed_data(self, data):
results_queue = Queue() results_queue = Queue()
chunks = list(random_split(data)) chunks = list(random_split(data))
@ -22,9 +22,8 @@ class TestPipeline(object):
pipeline = Aggregator( pipeline = Aggregator(
TimeChopper( TimeChopper(
DataPoller(source=chunks, DataPoller(
poll_period=0.1), source=chunks, poll_period=0.1), cache_size=3),
cache_size=3),
AGGR_CONFIG, AGGR_CONFIG,
False) False)
drain = Drain(pipeline, results_queue) drain = Drain(pipeline, results_queue)
@ -44,9 +43,8 @@ class TestPipeline(object):
pipeline = Aggregator( pipeline = Aggregator(
TimeChopper( TimeChopper(
DataPoller(source=producer(), DataPoller(
poll_period=0.1), source=producer(), poll_period=0.1), cache_size=3),
cache_size=3),
AGGR_CONFIG, AGGR_CONFIG,
False) False)
drain = Drain(pipeline, results_queue) drain = Drain(pipeline, results_queue)

View File

@ -8,5 +8,5 @@ def test_random_split(data):
assert len(dataframes) > 1 assert len(dataframes) > 1
concatinated = pd.concat(dataframes) concatinated = pd.concat(dataframes)
assert len(concatinated) == len(data), "We did not lose anything" assert len(concatinated) == len(data), "We did not lose anything"
assert np.allclose(concatinated.values, assert np.allclose(
data.values), "We did not corrupt the data" concatinated.values, data.values), "We did not corrupt the data"

View File

@ -44,10 +44,11 @@ class Plugin(AbstractPlugin):
process_stdout_file = self.core.mkstemp(".log", "appium_stdout_") process_stdout_file = self.core.mkstemp(".log", "appium_stdout_")
self.core.add_artifact_file(process_stdout_file) self.core.add_artifact_file(process_stdout_file)
self.process_stdout = open(process_stdout_file, 'w') self.process_stdout = open(process_stdout_file, 'w')
self.process = subprocess.Popen(args, self.process = subprocess.Popen(
stderr=self.process_stdout, args,
stdout=self.process_stdout, stderr=self.process_stdout,
close_fds=True) stdout=self.process_stdout,
close_fds=True)
logger.info("Waiting 5 seconds for Appium to start...") logger.info("Waiting 5 seconds for Appium to start...")
time.sleep(5) time.sleep(5)
@ -61,8 +62,8 @@ class Plugin(AbstractPlugin):
def end_test(self, retcode): def end_test(self, retcode):
if self.process and self.process.poll() is None: if self.process and self.process.poll() is None:
logger.info("Terminating appium process with PID %s", logger.info(
self.process.pid) "Terminating appium process with PID %s", self.process.pid)
self.process.terminate() self.process.terminate()
if self.process_stdout: if self.process_stdout:
self.process_stdout.close() self.process_stdout.close()

View File

@ -20,14 +20,13 @@ class AvgTimeCriterion(AbstractCriterion):
def __init__(self, autostop, param_str): def __init__(self, autostop, param_str):
AbstractCriterion.__init__(self) AbstractCriterion.__init__(self)
self.seconds_count = 0 self.seconds_count = 0
self.rt_limit = expand_to_milliseconds(param_str.split(',')[ self.rt_limit = expand_to_milliseconds(param_str.split(',')[0])
0]) self.seconds_limit = expand_to_seconds(param_str.split(',')[1])
self.seconds_limit = expand_to_seconds(param_str.split(',')[
1])
self.autostop = autostop self.autostop = autostop
def notify(self, data, stat): def notify(self, data, stat):
if (data["overall"]["interval_real"]["total"] / 1000.0 / if (
data["overall"]["interval_real"]["total"] / 1000.0 /
data["overall"]["interval_real"]["len"]) > self.rt_limit: data["overall"]["interval_real"]["len"]) > self.rt_limit:
if not self.seconds_count: if not self.seconds_count:
self.cause_second = (data, stat) self.cause_second = (data, stat)
@ -47,10 +46,10 @@ class AvgTimeCriterion(AbstractCriterion):
return self.RC_TIME return self.RC_TIME
def explain(self): def explain(self):
explanation = ("Average response time higher" explanation = (
" than %sms for %ss, since %s" % "Average response time higher"
(self.rt_limit, self.seconds_count, " than %sms for %ss, since %s" %
self.cause_second[0]["ts"])) (self.rt_limit, self.seconds_count, self.cause_second[0]["ts"]))
return explanation return explanation
def widget_explain(self): def widget_explain(self):
@ -80,8 +79,7 @@ class HTTPCodesCriterion(AbstractCriterion):
else: else:
self.level = int(level_str) self.level = int(level_str)
self.is_relative = False self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[ self.seconds_limit = expand_to_seconds(param_str.split(',')[2])
2])
def notify(self, data, stat): def notify(self, data, stat):
matched_responses = self.count_matched_codes( matched_responses = self.count_matched_codes(
@ -92,8 +90,9 @@ class HTTPCodesCriterion(AbstractCriterion):
"interval_real"]["len"] "interval_real"]["len"]
else: else:
matched_responses = 0 matched_responses = 0
logger.debug("HTTP codes matching mask %s: %s/%s", self.codes_mask, logger.debug(
matched_responses, self.level) "HTTP codes matching mask %s: %s/%s", self.codes_mask,
matched_responses, self.level)
if matched_responses >= self.level: if matched_responses >= self.level:
if not self.seconds_count: if not self.seconds_count:
@ -122,13 +121,15 @@ class HTTPCodesCriterion(AbstractCriterion):
return level_str return level_str
def explain(self): def explain(self):
items = (self.codes_mask, self.get_level_str(), self.seconds_count, items = (
self.cause_second[0].get('ts')) self.codes_mask, self.get_level_str(), self.seconds_count,
self.cause_second[0].get('ts'))
return "%s codes count higher than %s for %ss, since %s" % items return "%s codes count higher than %s for %ss, since %s" % items
def widget_explain(self): def widget_explain(self):
items = (self.codes_mask, self.get_level_str(), self.seconds_count, items = (
self.seconds_limit) self.codes_mask, self.get_level_str(), self.seconds_count,
self.seconds_limit)
return "HTTP %s>%s for %s/%ss" % items, float( return "HTTP %s>%s for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit self.seconds_count) / self.seconds_limit
@ -154,8 +155,7 @@ class NetCodesCriterion(AbstractCriterion):
else: else:
self.level = int(level_str) self.level = int(level_str)
self.is_relative = False self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[ self.seconds_limit = expand_to_seconds(param_str.split(',')[2])
2])
def notify(self, data, stat): def notify(self, data, stat):
codes = copy.deepcopy(data["overall"]["net_code"]["count"]) codes = copy.deepcopy(data["overall"]["net_code"]["count"])
@ -168,8 +168,9 @@ class NetCodesCriterion(AbstractCriterion):
"interval_real"]["len"] "interval_real"]["len"]
else: else:
matched_responses = 0 matched_responses = 0
logger.debug("Net codes matching mask %s: %s/%s", self.codes_mask, logger.debug(
matched_responses, self.level) "Net codes matching mask %s: %s/%s", self.codes_mask,
matched_responses, self.level)
if matched_responses >= self.level: if matched_responses >= self.level:
if not self.seconds_count: if not self.seconds_count:
@ -198,13 +199,15 @@ class NetCodesCriterion(AbstractCriterion):
return level_str return level_str
def explain(self): def explain(self):
items = (self.codes_mask, self.get_level_str(), self.seconds_count, items = (
self.cause_second[0].get("ts")) self.codes_mask, self.get_level_str(), self.seconds_count,
self.cause_second[0].get("ts"))
return "%s net codes count higher than %s for %ss, since %s" % items return "%s net codes count higher than %s for %ss, since %s" % items
def widget_explain(self): def widget_explain(self):
items = (self.codes_mask, self.get_level_str(), self.seconds_count, items = (
self.seconds_limit) self.codes_mask, self.get_level_str(), self.seconds_count,
self.seconds_limit)
return "Net %s>%s for %s/%ss" % items, float( return "Net %s>%s for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit self.seconds_count) / self.seconds_limit
@ -225,8 +228,10 @@ class QuantileCriterion(AbstractCriterion):
self.autostop = autostop self.autostop = autostop
def notify(self, data, stat): def notify(self, data, stat):
quantiles = dict(zip(data["overall"]["interval_real"]["q"]["q"], data[ quantiles = dict(
"overall"]["interval_real"]["q"]["value"])) zip(
data["overall"]["interval_real"]["q"]["q"], data["overall"][
"interval_real"]["q"]["value"]))
if self.quantile not in quantiles.keys(): if self.quantile not in quantiles.keys():
logger.warning("No quantile %s in %s", self.quantile, quantiles) logger.warning("No quantile %s in %s", self.quantile, quantiles)
if self.quantile in quantiles.keys() \ if self.quantile in quantiles.keys() \
@ -249,13 +254,15 @@ class QuantileCriterion(AbstractCriterion):
return self.RC_TIME return self.RC_TIME
def explain(self): def explain(self):
items = (self.quantile, self.rt_limit, self.seconds_count, items = (
self.cause_second[0].get("ts")) self.quantile, self.rt_limit, self.seconds_count,
self.cause_second[0].get("ts"))
return "Percentile %s higher than %sms for %ss, since %s" % items return "Percentile %s higher than %sms for %ss, since %s" % items
def widget_explain(self): def widget_explain(self):
items = (self.quantile, self.rt_limit, self.seconds_count, items = (
self.seconds_limit) self.quantile, self.rt_limit, self.seconds_count,
self.seconds_limit)
return "%s%% >%sms for %s/%ss" % items, float( return "%s%% >%sms for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit self.seconds_count) / self.seconds_limit
@ -272,13 +279,12 @@ class SteadyCumulativeQuantilesCriterion(AbstractCriterion):
AbstractCriterion.__init__(self) AbstractCriterion.__init__(self)
self.seconds_count = 0 self.seconds_count = 0
self.quantile_hash = "" self.quantile_hash = ""
self.seconds_limit = expand_to_seconds(param_str.split(',')[ self.seconds_limit = expand_to_seconds(param_str.split(',')[0])
0])
self.autostop = autostop self.autostop = autostop
def notify(self, data, stat): def notify(self, data, stat):
quantiles = dict(zip(data["overall"]["q"]["q"], data["overall"]["q"][ quantiles = dict(
"values"])) zip(data["overall"]["q"]["q"], data["overall"]["q"]["values"]))
quantile_hash = json.dumps(quantiles) quantile_hash = json.dumps(quantiles)
logging.debug("Cumulative quantiles hash: %s", quantile_hash) logging.debug("Cumulative quantiles hash: %s", quantile_hash)
if self.quantile_hash == quantile_hash: if self.quantile_hash == quantile_hash:

View File

@ -13,7 +13,6 @@ logger = logging.getLogger(__name__)
class WindowCounter(object): class WindowCounter(object):
def __init__(self, window_size): def __init__(self, window_size):
self.window_size = window_size self.window_size = window_size
self.value = 0.0 self.value = 0.0
@ -60,8 +59,8 @@ class TotalFracTimeCriterion(AbstractCriterion):
def __fail_count(self, data): def __fail_count(self, data):
ecdf = np.cumsum(data["overall"]["interval_real"]["hist"]["data"]) ecdf = np.cumsum(data["overall"]["interval_real"]["hist"]["data"])
idx = np.searchsorted(data["overall"]["interval_real"]["hist"]["bins"], idx = np.searchsorted(
self.rt_limit) data["overall"]["interval_real"]["hist"]["bins"], self.rt_limit)
if idx == 0: if idx == 0:
return ecdf[-1] return ecdf[-1]
elif idx == len(ecdf): elif idx == len(ecdf):
@ -73,8 +72,8 @@ class TotalFracTimeCriterion(AbstractCriterion):
self.seconds.append((data, stat)) self.seconds.append((data, stat))
self.fail_counter.push(self.__fail_count(data)) self.fail_counter.push(self.__fail_count(data))
self.total_counter.push(data["overall"]["interval_real"]["len"]) self.total_counter.push(data["overall"]["interval_real"]["len"])
self.total_fail_ratio = (self.fail_counter.value / self.total_fail_ratio = (
self.total_counter.value) self.fail_counter.value / self.total_counter.value)
if self.total_fail_ratio >= self.fail_ratio_limit and len( if self.total_fail_ratio >= self.fail_ratio_limit and len(
self.fail_counter) >= self.window_size: self.fail_counter) >= self.window_size:
self.cause_second = self.seconds[0] self.cause_second = self.seconds[0]
@ -88,15 +87,17 @@ class TotalFracTimeCriterion(AbstractCriterion):
return 25 return 25
def explain(self): def explain(self):
return ("%.2f%% responses times higher " return (
"than %sms for %ss since: %s" % "%.2f%% responses times higher "
(self.total_fail_ratio * 100, self.rt_limit / 1000, "than %sms for %ss since: %s" % (
self.window_size, self.cause_second[0]["ts"])) self.total_fail_ratio * 100, self.rt_limit / 1000,
self.window_size, self.cause_second[0]["ts"]))
def widget_explain(self): def widget_explain(self):
return ("%.2f%% times >%sms for %ss" % return (
(self.total_fail_ratio * 100, self.rt_limit / 1000, "%.2f%% times >%sms for %ss" % (
self.window_size), self.total_fail_ratio) self.total_fail_ratio * 100, self.rt_limit / 1000,
self.window_size), self.total_fail_ratio)
class TotalHTTPCodesCriterion(AbstractCriterion): class TotalHTTPCodesCriterion(AbstractCriterion):
@ -122,8 +123,7 @@ class TotalHTTPCodesCriterion(AbstractCriterion):
else: else:
self.level = int(level_str) self.level = int(level_str)
self.is_relative = False self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[ self.seconds_limit = expand_to_seconds(param_str.split(',')[2])
2])
def notify(self, data, stat): def notify(self, data, stat):
matched_responses = self.count_matched_codes( matched_responses = self.count_matched_codes(
@ -134,8 +134,9 @@ class TotalHTTPCodesCriterion(AbstractCriterion):
"interval_real"]["len"] * 100 "interval_real"]["len"] * 100
else: else:
matched_responses = 1 matched_responses = 1
logger.debug("HTTP codes matching mask %s: %s/%s", self.codes_mask, logger.debug(
matched_responses, self.level) "HTTP codes matching mask %s: %s/%s", self.codes_mask,
matched_responses, self.level)
self.data.append(matched_responses) self.data.append(matched_responses)
self.second_window.append((data, stat)) self.second_window.append((data, stat))
if len(self.data) > self.seconds_limit: if len(self.data) > self.seconds_limit:
@ -144,11 +145,10 @@ class TotalHTTPCodesCriterion(AbstractCriterion):
queue_len = 1 queue_len = 1
if self.is_relative: if self.is_relative:
queue_len = len(self.data) queue_len = len(self.data)
if (sum(self.data) / queue_len) >= self.level and len( if (sum(self.data) / queue_len) >= self.level\
self.data) >= self.seconds_limit: and len(self.data) >= self.seconds_limit: # yapf:disable
self.cause_second = self.second_window[0] self.cause_second = self.second_window[0]
logger.debug(self.explain()) logger.debug(self.explain())
# self.autostop.add_counting(self)
return True return True
return False return False
@ -165,12 +165,15 @@ class TotalHTTPCodesCriterion(AbstractCriterion):
def explain(self): def explain(self):
if self.is_relative: if self.is_relative:
items = (self.codes_mask, self.get_level_str(), self.seconds_limit, items = (
self.cause_second[0]["ts"]) self.codes_mask, self.get_level_str(), self.seconds_limit,
return ("%s codes count higher " self.cause_second[0]["ts"])
"than %s for %ss, ended at: %s" % items) return (
items = (self.codes_mask, self.get_level_str(), self.seconds_limit, "%s codes count higher "
self.cause_second[0]["ts"]) "than %s for %ss, ended at: %s" % items)
items = (
self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return "%s codes count higher than %s for %ss, since %s" % items return "%s codes count higher than %s for %ss, since %s" % items
def widget_explain(self): def widget_explain(self):
@ -204,8 +207,7 @@ class TotalNetCodesCriterion(AbstractCriterion):
else: else:
self.level = int(level_str) self.level = int(level_str)
self.is_relative = False self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[ self.seconds_limit = expand_to_seconds(param_str.split(',')[2])
2])
def notify(self, data, stat): def notify(self, data, stat):
codes = data["overall"]["net_code"]["count"].copy() codes = data["overall"]["net_code"]["count"].copy()
@ -216,14 +218,15 @@ class TotalNetCodesCriterion(AbstractCriterion):
if data["overall"]["interval_real"]["len"]: if data["overall"]["interval_real"]["len"]:
matched_responses = float(matched_responses) / data["overall"][ matched_responses = float(matched_responses) / data["overall"][
"interval_real"]["len"] * 100 "interval_real"]["len"] * 100
logger.debug("Net codes matching mask %s: %s%%/%s", logger.debug(
self.codes_mask, round(matched_responses, "Net codes matching mask %s: %s%%/%s", self.codes_mask,
2), self.get_level_str()) round(matched_responses, 2), self.get_level_str())
else: else:
matched_responses = 1 matched_responses = 1
else: else:
logger.debug("Net codes matching mask %s: %s/%s", self.codes_mask, logger.debug(
matched_responses, self.get_level_str()) "Net codes matching mask %s: %s/%s", self.codes_mask,
matched_responses, self.get_level_str())
self.data.append(matched_responses) self.data.append(matched_responses)
self.second_window.append((data, stat)) self.second_window.append((data, stat))
@ -234,11 +237,10 @@ class TotalNetCodesCriterion(AbstractCriterion):
queue_len = 1 queue_len = 1
if self.is_relative: if self.is_relative:
queue_len = len(self.data) queue_len = len(self.data)
if (sum(self.data) / queue_len) >= self.level and len( if (sum(self.data) / queue_len) >= self.level\
self.data) >= self.seconds_limit: and len(self.data) >= self.seconds_limit: # yapf:disable
self.cause_second = self.second_window[0] self.cause_second = self.second_window[0]
logger.debug(self.explain()) logger.debug(self.explain())
# self.autostop.add_counting(self)
return True return True
return False return False
@ -255,12 +257,15 @@ class TotalNetCodesCriterion(AbstractCriterion):
def explain(self): def explain(self):
if self.is_relative: if self.is_relative:
items = (self.codes_mask, self.get_level_str(), self.seconds_limit, items = (
self.cause_second[0]["ts"]) self.codes_mask, self.get_level_str(), self.seconds_limit,
return ("%s net codes count higher " self.cause_second[0]["ts"])
"than %s for %ss, since %s" % items) return (
items = (self.codes_mask, self.get_level_str(), self.seconds_limit, "%s net codes count higher "
self.cause_second[0]["ts"]) "than %s for %ss, since %s" % items)
items = (
self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return "%s net codes count higher than %s for %ss, since %s" % items return "%s net codes count higher than %s for %ss, since %s" % items
def widget_explain(self): def widget_explain(self):
@ -294,8 +299,7 @@ class TotalNegativeHTTPCodesCriterion(AbstractCriterion):
else: else:
self.level = int(level_str) self.level = int(level_str)
self.is_relative = False self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[ self.seconds_limit = expand_to_seconds(param_str.split(',')[2])
2])
def notify(self, data, stat): def notify(self, data, stat):
matched_responses = self.count_matched_codes( matched_responses = self.count_matched_codes(
@ -307,14 +311,15 @@ class TotalNegativeHTTPCodesCriterion(AbstractCriterion):
matched_responses = 100 - matched_responses matched_responses = 100 - matched_responses
else: else:
matched_responses = 1 matched_responses = 1
logger.debug("HTTP codes matching mask not %s: %s/%s", logger.debug(
self.codes_mask, round(matched_responses, "HTTP codes matching mask not %s: %s/%s", self.codes_mask,
1), self.level) round(matched_responses, 1), self.level)
else: else:
matched_responses = ( matched_responses = (
data["overall"]["interval_real"]["len"] - matched_responses) data["overall"]["interval_real"]["len"] - matched_responses)
logger.debug("HTTP codes matching mask not %s: %s/%s", logger.debug(
self.codes_mask, matched_responses, self.level) "HTTP codes matching mask not %s: %s/%s", self.codes_mask,
matched_responses, self.level)
self.data.append(matched_responses) self.data.append(matched_responses)
self.second_window.append((data, stat)) self.second_window.append((data, stat))
if len(self.data) > self.seconds_limit: if len(self.data) > self.seconds_limit:
@ -324,11 +329,10 @@ class TotalNegativeHTTPCodesCriterion(AbstractCriterion):
queue_len = 1 queue_len = 1
if self.is_relative: if self.is_relative:
queue_len = len(self.data) queue_len = len(self.data)
if (sum(self.data) / queue_len) >= self.level and len( if (sum(self.data) / queue_len) >= self.level\
self.data) >= self.seconds_limit: and len(self.data) >= self.seconds_limit: # yapf:disable
self.cause_second = self.second_window[0] self.cause_second = self.second_window[0]
logger.debug(self.explain()) logger.debug(self.explain())
# self.autostop.add_counting(self)
return True return True
return False return False
@ -345,12 +349,15 @@ class TotalNegativeHTTPCodesCriterion(AbstractCriterion):
def explain(self): def explain(self):
if self.is_relative: if self.is_relative:
items = (self.codes_mask, self.get_level_str(), self.seconds_limit, items = (
self.cause_second[0]["ts"]) self.codes_mask, self.get_level_str(), self.seconds_limit,
return ("Not %s codes count higher " self.cause_second[0]["ts"])
"than %s for %ss, since %s" % items) return (
items = (self.codes_mask, self.get_level_str(), self.seconds_limit, "Not %s codes count higher "
self.cause_second[0]["ts"]) "than %s for %ss, since %s" % items)
items = (
self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return "Not %s codes count higher than %s for %ss, since %s" % items return "Not %s codes count higher than %s for %ss, since %s" % items
def widget_explain(self): def widget_explain(self):
@ -384,8 +391,7 @@ class TotalNegativeNetCodesCriterion(AbstractCriterion):
else: else:
self.level = int(level_str) self.level = int(level_str)
self.is_relative = False self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[ self.seconds_limit = expand_to_seconds(param_str.split(',')[2])
2])
def notify(self, data, stat): def notify(self, data, stat):
codes = data["overall"]["net_code"]["count"].copy() codes = data["overall"]["net_code"]["count"].copy()
@ -399,14 +405,15 @@ class TotalNegativeNetCodesCriterion(AbstractCriterion):
matched_responses = 100 - matched_responses matched_responses = 100 - matched_responses
else: else:
matched_responses = 1 matched_responses = 1
logger.debug("Net codes matching mask not %s: %s/%s", logger.debug(
self.codes_mask, round(matched_responses, "Net codes matching mask not %s: %s/%s", self.codes_mask,
1), self.level) round(matched_responses, 1), self.level)
else: else:
matched_responses = ( matched_responses = (
data["overall"]["interval_real"]["len"] - matched_responses) data["overall"]["interval_real"]["len"] - matched_responses)
logger.debug("Net codes matching mask not %s: %s/%s", logger.debug(
self.codes_mask, matched_responses, self.level) "Net codes matching mask not %s: %s/%s", self.codes_mask,
matched_responses, self.level)
self.data.append(matched_responses) self.data.append(matched_responses)
self.second_window.append((data, stat)) self.second_window.append((data, stat))
if len(self.data) > self.seconds_limit: if len(self.data) > self.seconds_limit:
@ -416,8 +423,8 @@ class TotalNegativeNetCodesCriterion(AbstractCriterion):
queue_len = 1 queue_len = 1
if self.is_relative: if self.is_relative:
queue_len = len(self.data) queue_len = len(self.data)
if (sum(self.data) / queue_len) >= self.level and len( if (sum(self.data) / queue_len) >= self.level \
self.data) >= self.seconds_limit: and len(self.data) >= self.seconds_limit: # yapf:disable
self.cause_second = self.second_window[0] self.cause_second = self.second_window[0]
logger.debug(self.explain()) logger.debug(self.explain())
return True return True
@ -436,12 +443,15 @@ class TotalNegativeNetCodesCriterion(AbstractCriterion):
def explain(self): def explain(self):
if self.is_relative: if self.is_relative:
items = (self.codes_mask, self.get_level_str(), self.seconds_limit, items = (
self.cause_second[0]["ts"]) self.codes_mask, self.get_level_str(), self.seconds_limit,
return ("Not %s codes count higher " self.cause_second[0]["ts"])
"than %s for %ss, since %s" % items) return (
items = (self.codes_mask, self.get_level_str(), self.seconds_limit, "Not %s codes count higher "
self.cause_second[0]["ts"]) "than %s for %ss, since %s" % items)
items = (
self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return "Not %s codes count higher than %s for %ss, since %s" % items return "Not %s codes count higher than %s for %ss, since %s" % items
def widget_explain(self): def widget_explain(self):
@ -471,8 +481,7 @@ class TotalHTTPTrendCriterion(AbstractCriterion):
self.tangents.append(0) self.tangents.append(0)
self.last = 0 self.last = 0
self.seconds_limit = expand_to_seconds(param_str.split(',')[ self.seconds_limit = expand_to_seconds(param_str.split(',')[1])
1])
self.measurement_error = float() self.measurement_error = float()
def notify(self, data, stat): def notify(self, data, stat):
@ -491,8 +500,9 @@ class TotalHTTPTrendCriterion(AbstractCriterion):
self.measurement_error = self.calc_measurement_error(self.tangents) self.measurement_error = self.calc_measurement_error(self.tangents)
self.total_tan = float(sum(self.tangents) / len(self.tangents)) self.total_tan = float(sum(self.tangents) / len(self.tangents))
logger.debug("Last trend for http codes %s: %.2f +/- %.2f", logger.debug(
self.codes_mask, self.total_tan, self.measurement_error) "Last trend for http codes %s: %.2f +/- %.2f", self.codes_mask,
self.total_tan, self.measurement_error)
if self.total_tan + self.measurement_error < 0: if self.total_tan + self.measurement_error < 0:
self.cause_second = self.second_window[0] self.cause_second = self.second_window[0]
@ -521,14 +531,17 @@ class TotalHTTPTrendCriterion(AbstractCriterion):
return 30 return 30
def explain(self): def explain(self):
items = (self.codes_mask, self.total_tan, self.measurement_error, items = (
self.seconds_limit, self.cause_second[0]["ts"]) self.codes_mask, self.total_tan, self.measurement_error,
return ("Last trend for %s http codes " self.seconds_limit, self.cause_second[0]["ts"])
"is %.2f +/- %.2f for %ss, since %s" % items) return (
"Last trend for %s http codes "
"is %.2f +/- %.2f for %ss, since %s" % items)
def widget_explain(self): def widget_explain(self):
items = (self.codes_mask, self.total_tan, self.measurement_error, items = (
self.seconds_limit) self.codes_mask, self.total_tan, self.measurement_error,
self.seconds_limit)
return ("HTTP(%s) trend is %.2f +/- %.2f < 0 for %ss" % items, 1.0) return ("HTTP(%s) trend is %.2f +/- %.2f < 0 for %ss" % items, 1.0)
@ -543,6 +556,7 @@ class QuantileOfSaturationCriterion(AbstractCriterion):
def __init__(self, autostop, param_str): def __init__(self, autostop, param_str):
AbstractCriterion.__init__(self) AbstractCriterion.__init__(self)
raise NotImplementedError raise NotImplementedError
# self.autostop = autostop # self.autostop = autostop
# self.data = deque() # self.data = deque()
# self.second_window = deque() # self.second_window = deque()

View File

@ -51,8 +51,8 @@ class Plugin(AbstractPlugin, AggregateResultListener):
aggregator = self.core.get_plugin_of_type(AggregatorPlugin) aggregator = self.core.get_plugin_of_type(AggregatorPlugin)
aggregator.add_result_listener(self) aggregator.add_result_listener(self)
self.criterion_str = " ".join(self.get_option("autostop", '').split( self.criterion_str = " ".join(
"\n")) self.get_option("autostop", '').split("\n"))
self._stop_report_path = os.path.join( self._stop_report_path = os.path.join(
self.core.artifacts_dir, self.core.artifacts_dir,
self.get_option("report_file", 'autostop_report.txt')) self.get_option("report_file", 'autostop_report.txt'))
@ -92,8 +92,9 @@ class Plugin(AbstractPlugin, AggregateResultListener):
def is_test_finished(self): def is_test_finished(self):
if self.cause_criterion: if self.cause_criterion:
self.log.info("Autostop criterion requested test stop: %s", self.log.info(
self.cause_criterion.explain()) "Autostop criterion requested test stop: %s",
self.cause_criterion.explain())
return self.cause_criterion.get_rc() return self.cause_criterion.get_rc()
else: else:
return -1 return -1
@ -107,8 +108,8 @@ class Plugin(AbstractPlugin, AggregateResultListener):
for criterion_class in self.custom_criterions: for criterion_class in self.custom_criterions:
if criterion_class.get_type_string() == type_str: if criterion_class.get_type_string() == type_str:
return criterion_class(self, parsed[1]) return criterion_class(self, parsed[1])
raise ValueError("Unsupported autostop criterion type: %s" % raise ValueError(
criterion_str) "Unsupported autostop criterion type: %s" % criterion_str)
def on_aggregated_data(self, data, stat): def on_aggregated_data(self, data, stat):
self.counting = [] self.counting = []
@ -116,8 +117,7 @@ class Plugin(AbstractPlugin, AggregateResultListener):
for criterion_text, criterion in self._criterions.iteritems(): for criterion_text, criterion in self._criterions.iteritems():
if criterion.notify(data, stat): if criterion.notify(data, stat):
self.log.debug( self.log.debug(
"Autostop criterion requested test stop: %s", "Autostop criterion requested test stop: %s", criterion)
criterion)
self.cause_criterion = criterion self.cause_criterion = criterion
open(self._stop_report_path, 'w').write(criterion_text) open(self._stop_report_path, 'w').write(criterion_text)
self.core.add_artifact_file(self._stop_report_path) self.core.add_artifact_file(self._stop_report_path)

View File

@ -22,10 +22,13 @@ class Plugin(AbstractPlugin):
self.default_target = None self.default_target = None
self.device_id = None self.device_id = None
self.cmds = { self.cmds = {
"enable_full_log": "adb %s shell dumpsys batterystats --enable full-wake-history", "enable_full_log":
"disable_full_log": "adb %s shell dumpsys batterystats --disable full-wake-history", "adb %s shell dumpsys batterystats --enable full-wake-history",
"disable_full_log":
"adb %s shell dumpsys batterystats --disable full-wake-history",
"reset": "adb %s shell dumpsys batterystats --reset", "reset": "adb %s shell dumpsys batterystats --reset",
"dump": "adb %s shell dumpsys batterystats"} "dump": "adb %s shell dumpsys batterystats"
}
def get_available_options(self): def get_available_options(self):
return ["device_id"] return ["device_id"]
@ -57,8 +60,7 @@ class Plugin(AbstractPlugin):
try: try:
logger.debug('dumping battery stats') logger.debug('dumping battery stats')
dump = subprocess.Popen( dump = subprocess.Popen(
self.cmds['dump'], self.cmds['dump'], stdout=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True).communicate()[0] shell=True).communicate()[0]
out = subprocess.check_output( out = subprocess.check_output(
self.cmds['disable_full_log'], shell=True) self.cmds['disable_full_log'], shell=True)

View File

@ -40,4 +40,7 @@ def scenario_2(missile, marker, measure):
SCENARIOS module variable is used by Tank to choose the scenario to SCENARIOS module variable is used by Tank to choose the scenario to
shoot with. For each missile Tank will look up missile marker in this dict. shoot with. For each missile Tank will look up missile marker in this dict.
""" """
SCENARIOS = {"scenario_1": scenario_1, "scenario_2": scenario_1, } SCENARIOS = {
"scenario_1": scenario_1,
"scenario_2": scenario_1,
}

View File

@ -3,7 +3,6 @@ log = logging.getLogger(__name__)
class LoadTest(object): class LoadTest(object):
def __init__(self, gun): def __init__(self, gun):
self.gun = gun self.gun = gun

View File

@ -16,7 +16,6 @@ requests.packages.urllib3.disable_warnings()
class AbstractGun(AbstractPlugin): class AbstractGun(AbstractPlugin):
def __init__(self, core): def __init__(self, core):
super(AbstractGun, self).__init__(core) super(AbstractGun, self).__init__(core)
self.results = None self.results = None
@ -49,8 +48,8 @@ class AbstractGun(AbstractPlugin):
raise raise
finally: finally:
if data_item.get("interval_real") is None: if data_item.get("interval_real") is None:
data_item["interval_real"] = int((time.time() - start_time) * data_item["interval_real"] = int(
1e6) (time.time() - start_time) * 1e6)
self.results.put(data_item, timeout=1) self.results.put(data_item, timeout=1)
@ -152,8 +151,8 @@ class CustomGun(AbstractGun):
module_name = self.get_option("module_name") module_name = self.get_option("module_name")
fp, pathname, description = imp.find_module(module_name, module_path) fp, pathname, description = imp.find_module(module_name, module_path)
try: try:
self.module = imp.load_module(module_name, fp, pathname, self.module = imp.load_module(
description) module_name, fp, pathname, description)
finally: finally:
if fp: if fp:
fp.close() fp.close()
@ -186,8 +185,8 @@ class ScenarioGun(AbstractGun):
module_name = self.get_option("module_name") module_name = self.get_option("module_name")
fp, pathname, description = imp.find_module(module_name, module_path) fp, pathname, description = imp.find_module(module_name, module_path)
try: try:
self.module = imp.load_module(module_name, fp, pathname, self.module = imp.load_module(
description) module_name, fp, pathname, description)
finally: finally:
if fp: if fp:
fp.close() fp.close()
@ -232,8 +231,8 @@ class UltimateGun(AbstractGun):
# it is imported to be sure Python won't be able to cache it # it is imported to be sure Python won't be able to cache it
# #
try: try:
self.module = imp.load_module("%s_%d" % (module_name, time.time()), self.module = imp.load_module(
fp, pathname, description) "%s_%d" % (module_name, time.time()), fp, pathname, description)
finally: finally:
if fp: if fp:
fp.close() fp.close()

View File

@ -62,17 +62,18 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
if gun_type in self.gun_classes: if gun_type in self.gun_classes:
self.gun = self.gun_classes[gun_type](self.core) self.gun = self.gun_classes[gun_type](self.core)
else: else:
raise NotImplementedError('No such gun type implemented: "%s"' % raise NotImplementedError(
gun_type) 'No such gun type implemented: "%s"' % gun_type)
cached_stpd_option = self.get_option("cached_stpd", '0') cached_stpd_option = self.get_option("cached_stpd", '0')
if cached_stpd_option == '1': if cached_stpd_option == '1':
cached_stpd = True cached_stpd = True
else: else:
cached_stpd = False cached_stpd = False
self.bfg = BFG(gun=self.gun, self.bfg = BFG(
instances=self.stepper_wrapper.instances, gun=self.gun,
stpd_filename=self.stepper_wrapper.stpd, instances=self.stepper_wrapper.instances,
cached_stpd=cached_stpd) stpd_filename=self.stepper_wrapper.stpd,
cached_stpd=cached_stpd)
aggregator = None aggregator = None
try: try:
aggregator = self.core.get_plugin_of_type(AggregatorPlugin) aggregator = self.core.get_plugin_of_type(AggregatorPlugin)

View File

@ -14,11 +14,10 @@ def records_to_df(records):
def _expand_steps(steps): def _expand_steps(steps):
return list(itt.chain(*[[rps] * int(duration) for rps, duration in steps])) return list(itt.chain(* [[rps] * int(duration) for rps, duration in steps]))
class BfgReader(object): class BfgReader(object):
def __init__(self, results): def __init__(self, results):
self.buffer = "" self.buffer = ""
self.stat_buffer = "" self.stat_buffer = ""
@ -43,7 +42,6 @@ class BfgReader(object):
class BfgStatsReader(object): class BfgStatsReader(object):
def __init__(self, instance_counter, steps): def __init__(self, instance_counter, steps):
self.closed = False self.closed = False
self.last_ts = 0 self.last_ts = 0
@ -59,9 +57,13 @@ class BfgStatsReader(object):
reqps = 0 reqps = 0
if offset >= 0 and offset < len(self.steps): if offset >= 0 and offset < len(self.steps):
reqps = self.steps[offset] reqps = self.steps[offset]
yield [{'ts': cur_ts, yield [{
'metrics': {'instances': self.instance_counter.value, 'ts': cur_ts,
'reqps': reqps}}] 'metrics': {
'instances': self.instance_counter.value,
'reqps': reqps
}
}]
self.last_ts = cur_ts self.last_ts = cur_ts
else: else:
yield [] yield []

View File

@ -33,8 +33,7 @@ class BfgInfoWidget(AbstractInfoWidget):
res += str(self.instances) res += str(self.instances)
res += "\nPlanned requests: %s for %s\nActual responses: " % ( res += "\nPlanned requests: %s for %s\nActual responses: " % (
self.planned, self.planned, datetime.timedelta(seconds=self.planned_rps_duration))
datetime.timedelta(seconds=self.planned_rps_duration))
if not self.planned == self.RPS: if not self.planned == self.RPS:
res += screen.markup.YELLOW + str(self.RPS) + screen.markup.RESET res += screen.markup.YELLOW + str(self.RPS) + screen.markup.RESET
else: else:

View File

@ -16,13 +16,15 @@ class BFG(object):
""" """
def __init__(self, gun, instances, stpd_filename, cached_stpd=False): def __init__(self, gun, instances, stpd_filename, cached_stpd=False):
logger.info(""" logger.info(
"""
BFG using stpd from {stpd_filename} BFG using stpd from {stpd_filename}
Instances: {instances} Instances: {instances}
Gun: {gun.__class__.__name__} Gun: {gun.__class__.__name__}
""".format(stpd_filename=stpd_filename, """.format(
instances=instances, stpd_filename=stpd_filename,
gun=gun, )) instances=instances,
gun=gun, ))
self.instances = int(instances) self.instances = int(instances)
self.instance_counter = mp.Value('i') self.instance_counter = mp.Value('i')
self.results = mp.Queue() self.results = mp.Queue()
@ -60,9 +62,12 @@ Gun: {gun.__class__.__name__}
Say the workers to finish their jobs and quit. Say the workers to finish their jobs and quit.
""" """
self.quit.set() self.quit.set()
while sorted([self.pool[i].is_alive() # yapf:disable
for i in xrange(len(self.pool))])[-1]: while sorted([
self.pool[i].is_alive()
for i in xrange(len(self.pool))])[-1]:
time.sleep(1) time.sleep(1)
# yapf:enable
try: try:
while not self.task_queue.empty(): while not self.task_queue.empty():
self.task_queue.get(timeout=0.1) self.task_queue.get(timeout=0.1)
@ -94,19 +99,20 @@ Gun: {gun.__class__.__name__}
else: else:
continue continue
workers_count = self.instances workers_count = self.instances
logger.info("Feeded all data. Publishing %d killer tasks" % logger.info(
(workers_count)) "Feeded all data. Publishing %d killer tasks" % (workers_count))
retry_delay = 1 retry_delay = 1
for _ in range(5): for _ in range(5):
try: try:
[self.task_queue.put(None, [
timeout=1) self.task_queue.put(None, timeout=1)
for _ in xrange(0, workers_count)] for _ in xrange(0, workers_count)
]
break break
except Full: except Full:
logger.debug("Couldn't post killer tasks" logger.debug(
" because queue is full. Retrying in %ss", "Couldn't post killer tasks"
retry_delay) " because queue is full. Retrying in %ss", retry_delay)
time.sleep(retry_delay) time.sleep(retry_delay)
retry_delay *= 2 retry_delay *= 2
@ -160,8 +166,7 @@ Gun: {gun.__class__.__name__}
logger.debug("Empty queue. Exiting process") logger.debug("Empty queue. Exiting process")
return return
except Full: except Full:
logger.warning( logger.warning("Couldn't put to result queue because it's full")
"Couldn't put to result queue because it's full")
except Exception: except Exception:
logger.exception("Bfg shoot exception") logger.exception("Bfg shoot exception")

View File

@ -40,8 +40,8 @@ class Plugin(AbstractPlugin, AggregateResultListener):
] ]
def configure(self): def configure(self):
self.info_panel_width = self.get_option("info_panel_width", self.info_panel_width = self.get_option(
self.info_panel_width) "info_panel_width", self.info_panel_width)
self.short_only = int(self.get_option("short_only", '0')) self.short_only = int(self.get_option("short_only", '0'))
if not int(self.get_option("disable_all_colors", '0')): if not int(self.get_option("disable_all_colors", '0')):
self.console_markup = RealConsoleMarkup() self.console_markup = RealConsoleMarkup()
@ -97,15 +97,17 @@ class Plugin(AbstractPlugin, AggregateResultListener):
if self.short_only: if self.short_only:
overall = data.get('overall') overall = data.get('overall')
quantiles = dict(zip(overall['interval_real']['q']['q'], overall[ quantiles = dict(
'interval_real']['q']['value'])) zip(
overall['interval_real']['q']['q'], overall['interval_real']
['q']['value']))
info = ( info = (
"ts:{ts}\tRPS:{rps}\tavg:{avg_rt:.2f}\t" "ts:{ts}\tRPS:{rps}\tavg:{avg_rt:.2f}\t"
"min:{min:.2f}\tmax:{q100:.2f}\tq95:{q95:.2f}\t").format( "min:{min:.2f}\tmax:{q100:.2f}\tq95:{q95:.2f}\t").format(
ts=data.get('ts'), ts=data.get('ts'),
rps=overall['interval_real']['len'], rps=overall['interval_real']['len'],
avg_rt=float(overall['interval_real']['total']) / overall[ avg_rt=float(overall['interval_real']['total']) /
'interval_real']['len'] / 1000.0, overall['interval_real']['len'] / 1000.0,
min=overall['interval_real']['min'] / 1000.0, min=overall['interval_real']['min'] / 1000.0,
q100=quantiles[100] / 1000, q100=quantiles[100] / 1000,
q95=quantiles[95] / 1000) q95=quantiles[95] / 1000)
@ -120,6 +122,7 @@ class Plugin(AbstractPlugin, AggregateResultListener):
else: else:
self.screen.add_info_widget(widget) self.screen.add_info_widget(widget)
# ====================================================== # ======================================================
@ -147,12 +150,15 @@ class RealConsoleMarkup(object):
def clean_markup(self, orig_str): def clean_markup(self, orig_str):
''' clean markup from string ''' ''' clean markup from string '''
for val in [self.YELLOW, self.RED, self.RESET, self.CYAN, for val in [
self.BG_MAGENTA, self.WHITE, self.BG_GREEN, self.GREEN, self.YELLOW, self.RED, self.RESET, self.CYAN, self.BG_MAGENTA,
self.BG_BROWN, self.RED_DARK, self.MAGENTA, self.BG_CYAN]: self.WHITE, self.BG_GREEN, self.GREEN, self.BG_BROWN,
self.RED_DARK, self.MAGENTA, self.BG_CYAN
]:
orig_str = orig_str.replace(val, '') orig_str = orig_str.replace(val, '')
return orig_str return orig_str
# ====================================================== # ======================================================
# FIXME: 3 better way to have it? # FIXME: 3 better way to have it?
@ -177,4 +183,5 @@ class NoConsoleMarkup(RealConsoleMarkup):
BG_BROWN = '' BG_BROWN = ''
BG_CYAN = '' BG_CYAN = ''
# ====================================================== # ======================================================

View File

@ -22,8 +22,8 @@ def get_terminal_size():
Helper to get console size Helper to get console size
''' '''
try: try:
sizes = struct.unpack('hh', fcntl.ioctl(file_d, termios.TIOCGWINSZ, sizes = struct.unpack(
'1234')) 'hh', fcntl.ioctl(file_d, termios.TIOCGWINSZ, '1234'))
except Exception: except Exception:
sizes = default_size sizes = default_size
return sizes return sizes
@ -85,8 +85,8 @@ class Screen(object):
if len(right_line) > self.right_panel_width: if len(right_line) > self.right_panel_width:
right_line_plain = self.markup.clean_markup(right_line) right_line_plain = self.markup.clean_markup(right_line)
if len(right_line_plain) > self.right_panel_width: if len(right_line_plain) > self.right_panel_width:
right_line = right_line[ right_line = right_line[:self.
: self.right_panel_width] + self.markup.RESET right_panel_width] + self.markup.RESET
return right_line return right_line
def __render_left_panel(self): def __render_left_panel(self):
@ -124,8 +124,8 @@ class Screen(object):
def render_screen(self): def render_screen(self):
''' Main method to render screen view ''' ''' Main method to render screen view '''
self.term_width, self.term_height = get_terminal_size() self.term_width, self.term_height = get_terminal_size()
self.log.debug("Terminal size: %sx%s", self.term_width, self.log.debug(
self.term_height) "Terminal size: %sx%s", self.term_width, self.term_height)
self.right_panel_width = int( self.right_panel_width = int(
(self.term_width - len(self.RIGHT_PANEL_SEPARATOR)) * (self.term_width - len(self.RIGHT_PANEL_SEPARATOR)) *
(float(self.info_panel_percent) / 100)) - 1 (float(self.info_panel_percent) / 100)) - 1
@ -135,14 +135,14 @@ class Screen(object):
else: else:
self.right_panel_width = 0 self.right_panel_width = 0
self.left_panel_width = self.term_width - 1 self.left_panel_width = self.term_width - 1
self.log.debug("Left/right panels width: %s/%s", self.left_panel_width, self.log.debug(
self.right_panel_width) "Left/right panels width: %s/%s", self.left_panel_width,
self.right_panel_width)
widget_output = [] widget_output = []
if self.right_panel_width: if self.right_panel_width:
widget_output = [] widget_output = []
self.log.debug("There are %d info widgets" % self.log.debug("There are %d info widgets" % len(self.info_widgets))
len(self.info_widgets))
for index, widget in sorted( for index, widget in sorted(
self.info_widgets.iteritems(), self.info_widgets.iteritems(),
key=lambda item: (item[1].get_index(), item[0])): key=lambda item: (item[1].get_index(), item[0])):
@ -164,11 +164,11 @@ class Screen(object):
left_line_plain = self.markup.clean_markup(left_line) left_line_plain = self.markup.clean_markup(left_line)
if len(left_line) > self.left_panel_width: if len(left_line) > self.left_panel_width:
if len(left_line_plain) > self.left_panel_width: if len(left_line_plain) > self.left_panel_width:
left_line = left_line[ left_line = left_line[:self.
: self.left_panel_width] + self.markup.RESET left_panel_width] + self.markup.RESET
left_line += (' ' * left_line += (
(self.left_panel_width - len(left_line_plain))) ' ' * (self.left_panel_width - len(left_line_plain)))
line += left_line line += left_line
else: else:
line += ' ' * self.left_panel_width line += ' ' * self.left_panel_width
@ -223,6 +223,7 @@ class AbstractBlock:
''' '''
raise RuntimeError("Abstract method needs to be overridden") raise RuntimeError("Abstract method needs to be overridden")
# ====================================================== # ======================================================
@ -270,8 +271,9 @@ class CurrentTimesDistBlock(AbstractBlock):
def add_second(self, data): def add_second(self, data):
self.current_rps = data["overall"]["interval_real"]["len"] self.current_rps = data["overall"]["interval_real"]["len"]
self.hist = zip(data["overall"]["interval_real"]["hist"]["bins"], self.hist = zip(
data["overall"]["interval_real"]["hist"]["data"], ) data["overall"]["interval_real"]["hist"]["bins"],
data["overall"]["interval_real"]["hist"]["data"], )
def render(self): def render(self):
self.lines = [] self.lines = []
@ -291,6 +293,7 @@ class CurrentTimesDistBlock(AbstractBlock):
self.width = max(self.width, len(self.lines[0])) self.width = max(self.width, len(self.lines[0]))
# ====================================================== # ======================================================
@ -323,8 +326,8 @@ class CurrentHTTPBlock(AbstractBlock):
] ]
for code, count in sorted(self.times_dist.iteritems()): for code, count in sorted(self.times_dist.iteritems()):
line = self.format_line(code, count) line = self.format_line(code, count)
self.width = max(self.width, self.width = max(
len(self.screen.markup.clean_markup(line))) self.width, len(self.screen.markup.clean_markup(line)))
self.lines.append(line) self.lines.append(line)
def format_line(self, code, count): def format_line(self, code, count):
@ -358,6 +361,7 @@ class CurrentHTTPBlock(AbstractBlock):
return left_line return left_line
# ====================================================== # ======================================================
@ -412,10 +416,11 @@ class CurrentNetBlock(AbstractBlock):
] ]
for code, count in sorted(self.times_dist.iteritems()): for code, count in sorted(self.times_dist.iteritems()):
line = self.format_line(code, count) line = self.format_line(code, count)
self.width = max(self.width, self.width = max(
len(self.screen.markup.clean_markup(line))) self.width, len(self.screen.markup.clean_markup(line)))
self.lines.append(line) self.lines.append(line)
# ====================================================== # ======================================================
@ -429,10 +434,12 @@ class CurrentQuantilesBlock(AbstractBlock):
self.quantiles = {} self.quantiles = {}
def add_second(self, data): def add_second(self, data):
self.quantiles = {k: v self.quantiles = {
for k, v in zip(data["overall"]["interval_real"][ k: v
"q"]["q"], data["overall"]["interval_real"]["q"][ for k, v in zip(
"value"])} data["overall"]["interval_real"]["q"]["q"], data["overall"][
"interval_real"]["q"]["value"])
}
def render(self): def render(self):
self.lines = [] self.lines = []
@ -442,10 +449,12 @@ class CurrentQuantilesBlock(AbstractBlock):
self.lines.append(line) self.lines.append(line)
self.lines.reverse() self.lines.reverse()
self.lines = [self.screen.markup.WHITE + 'Current Percentiles:' + self.lines = [
self.screen.markup.RESET] + self.lines self.screen.markup.WHITE + 'Current Percentiles:' +
self.width = max(self.width, self.screen.markup.RESET
len(self.screen.markup.clean_markup(self.lines[0]))) ] + self.lines
self.width = max(
self.width, len(self.screen.markup.clean_markup(self.lines[0])))
def __format_line(self, quan, timing): def __format_line(self, quan, timing):
''' Format line ''' ''' Format line '''
@ -455,6 +464,7 @@ class CurrentQuantilesBlock(AbstractBlock):
left_line = tpl % data left_line = tpl % data
return left_line return left_line
# ====================================================== # ======================================================
@ -474,22 +484,24 @@ class AnswSizesBlock(AbstractBlock):
def render(self): def render(self):
self.lines = [self.header] self.lines = [self.header]
if self.count: if self.count:
self.lines.append(" Avg Request: %d bytes" % self.lines.append(
(self.sum_out / self.count)) " Avg Request: %d bytes" % (self.sum_out / self.count))
self.lines.append(" Avg Response: %d bytes" % self.lines.append(
(self.sum_in / self.count)) " Avg Response: %d bytes" % (self.sum_in / self.count))
self.lines.append("") self.lines.append("")
if self.cur_count: if self.cur_count:
self.lines.append(" Last Avg Request: %d bytes" % self.lines.append(
(self.cur_out / self.cur_count)) " Last Avg Request: %d bytes" %
self.lines.append(" Last Avg Response: %d bytes" % (self.cur_out / self.cur_count))
(self.cur_in / self.cur_count)) self.lines.append(
" Last Avg Response: %d bytes" %
(self.cur_in / self.cur_count))
else: else:
self.lines.append("") self.lines.append("")
self.lines.append("") self.lines.append("")
for line in self.lines: for line in self.lines:
self.width = max(self.width, self.width = max(
len(self.screen.markup.clean_markup(line))) self.width, len(self.screen.markup.clean_markup(line)))
def add_second(self, data): def add_second(self, data):
@ -501,6 +513,7 @@ class AnswSizesBlock(AbstractBlock):
self.sum_in += self.cur_in self.sum_in += self.cur_in
self.sum_out += self.cur_out self.sum_out += self.cur_out
# ====================================================== # ======================================================
@ -547,28 +560,41 @@ class AvgTimesBlock(AbstractBlock):
self.screen.markup.WHITE + self.header + self.screen.markup.RESET self.screen.markup.WHITE + self.header + self.screen.markup.RESET
] ]
if self.last_count: if self.last_count:
len_all = str(len(str(max( len_all = str(
[self.all_connect, self.all_latency, self.all_overall, len(
self.all_receive, self.all_send])))) str(
len_last = str(len(str(max( max([
[self.last_connect, self.last_latency, self.last_overall, self.all_connect, self.all_latency,
self.last_receive, self.last_send])))) self.all_overall, self.all_receive, self.all_send
]))))
len_last = str(
len(
str(
max([
self.last_connect, self.last_latency,
self.last_overall, self.last_receive, self.last_send
]))))
tpl = "%" + len_all + "d / %" + len_last + "d" tpl = "%" + len_all + "d / %" + len_last + "d"
self.lines.append(" Overall: " + tpl % (float( self.lines.append(
self.all_overall) / self.all_count, float(self.last_overall) / " Overall: " + tpl % (
self.last_count)) float(self.all_overall) / self.all_count, float(
self.lines.append(" Connect: " + tpl % (float( self.last_overall) / self.last_count))
self.all_connect) / self.all_count, float(self.last_connect) / self.lines.append(
self.last_count)) " Connect: " + tpl % (
self.lines.append(" Send: " + tpl % (float( float(self.all_connect) / self.all_count, float(
self.all_send) / self.all_count, float(self.last_send) / self.last_connect) / self.last_count))
self.last_count)) self.lines.append(
self.lines.append(" Latency: " + tpl % (float( " Send: " + tpl % (
self.all_latency) / self.all_count, float(self.last_latency) / float(self.all_send) / self.all_count, float(
self.last_count)) self.last_send) / self.last_count))
self.lines.append(" Receive: " + tpl % (float( self.lines.append(
self.all_receive) / self.all_count, float(self.last_receive) / " Latency: " + tpl % (
self.last_count)) float(self.all_latency) / self.all_count, float(
self.last_latency) / self.last_count))
self.lines.append(
" Receive: " + tpl % (
float(self.all_receive) / self.all_count, float(
self.last_receive) / self.last_count))
else: else:
self.lines.append("") self.lines.append("")
self.lines.append("") self.lines.append("")
@ -576,8 +602,9 @@ class AvgTimesBlock(AbstractBlock):
self.lines.append("") self.lines.append("")
self.lines.append("") self.lines.append("")
for line in self.lines: for line in self.lines:
self.width = max(self.width, self.width = max(
len(self.screen.markup.clean_markup(line))) self.width, len(self.screen.markup.clean_markup(line)))
# ====================================================== # ======================================================
@ -610,14 +637,13 @@ class CasesBlock(AbstractBlock):
self.screen.markup.WHITE + self.header + self.screen.markup.RESET self.screen.markup.WHITE + self.header + self.screen.markup.RESET
] ]
total_count = sum(case[0] for case in self.cases.values()) total_count = sum(case[0] for case in self.cases.values())
tpl = " %s: %" + str(len(str( tpl = " %s: %" + str(len(str(total_count))) + "d %5.2f%% / avg %.1f ms"
total_count))) + "d %5.2f%% / avg %.1f ms"
for name, (count, resp_time) in sorted(self.cases.iteritems()): for name, (count, resp_time) in sorted(self.cases.iteritems()):
line = tpl % (" " * (self.max_case_len - len(name)) + name, count, line = tpl % (
100 * float(count) / total_count, " " * (self.max_case_len - len(name)) + name, count,
float(resp_time) / count) 100 * float(count) / total_count, float(resp_time) / count)
self.lines.append(line) self.lines.append(line)
for line in self.lines: for line in self.lines:
self.width = max(self.width, self.width = max(
len(self.screen.markup.clean_markup(line))) self.width, len(self.screen.markup.clean_markup(line)))

View File

@ -44,8 +44,10 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
return __file__ return __file__
def get_available_options(self): def get_available_options(self):
return ["jmx", "args", "jmeter_path", "buffer_size", return [
"buffered_seconds", "exclude_markers"] "jmx", "args", "jmeter_path", "buffer_size", "buffered_seconds",
"exclude_markers"
]
def configure(self): def configure(self):
self.original_jmx = self.get_option("jmx") self.original_jmx = self.get_option("jmx")
@ -57,19 +59,19 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
self.jmeter_log = self.core.mkstemp('.log', 'jmeter_') self.jmeter_log = self.core.mkstemp('.log', 'jmeter_')
self.jmeter_ver = float(self.get_option('jmeter_ver', '3.0')) self.jmeter_ver = float(self.get_option('jmeter_ver', '3.0'))
self.ext_log = self.get_option( self.ext_log = self.get_option(
'extended_log', self.get_option( 'extended_log', self.get_option('ext_log', 'none'))
'ext_log', 'none'))
if self.ext_log not in self.ext_levels: if self.ext_log not in self.ext_levels:
self.ext_log = 'none' self.ext_log = 'none'
if self.ext_log != 'none': if self.ext_log != 'none':
self.ext_log_file = self.core.mkstemp('.jtl', 'jmeter_ext_') self.ext_log_file = self.core.mkstemp('.jtl', 'jmeter_ext_')
self.core.add_artifact_file(self.ext_log_file) self.core.add_artifact_file(self.ext_log_file)
self.jmeter_buffer_size = int(self.get_option( self.jmeter_buffer_size = int(
'buffer_size', self.get_option('buffered_seconds', '3'))) self.get_option(
'buffer_size', self.get_option('buffered_seconds', '3')))
self.core.add_artifact_file(self.jmeter_log, True) self.core.add_artifact_file(self.jmeter_log, True)
self.exclude_markers = set(filter( self.exclude_markers = set(
(lambda marker: marker != ''), self.get_option('exclude_markers', filter((lambda marker: marker != ''),
[]).split(' '))) self.get_option('exclude_markers', []).split(' ')))
self.jmx = self.__add_jmeter_components( self.jmx = self.__add_jmeter_components(
self.original_jmx, self.jtl_file, self._get_variables()) self.original_jmx, self.jtl_file, self._get_variables())
self.core.add_artifact_file(self.jmx) self.core.add_artifact_file(self.jmx)
@ -79,10 +81,11 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
self.jmeter_stderr = open(jmeter_stderr_file, 'w') self.jmeter_stderr = open(jmeter_stderr_file, 'w')
def prepare_test(self): def prepare_test(self):
self.args = [self.jmeter_path, "-n", "-t", self.jmx, '-j', self.args = [
self.jmeter_log, self.jmeter_path, "-n", "-t", self.jmx, '-j', self.jmeter_log,
'-Jjmeter.save.saveservice.default_delimiter=\\t', '-Jjmeter.save.saveservice.default_delimiter=\\t',
'-Jjmeter.save.saveservice.connect_time=true'] '-Jjmeter.save.saveservice.connect_time=true'
]
self.args += splitstring(self.user_args) self.args += splitstring(self.user_args)
aggregator = None aggregator = None
@ -108,8 +111,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
aggregator.add_result_listener(widget) aggregator.add_result_listener(widget)
def start_test(self): def start_test(self):
logger.info("Starting %s with arguments: %s", self.jmeter_path, logger.info(
self.args) "Starting %s with arguments: %s", self.jmeter_path, self.args)
try: try:
self.jmeter_process = subprocess.Popen( self.jmeter_process = subprocess.Popen(
self.args, self.args,
@ -117,8 +120,7 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
preexec_fn=os.setsid, preexec_fn=os.setsid,
close_fds=True, close_fds=True,
stdout=self.jmeter_stderr, stdout=self.jmeter_stderr,
stderr=self.jmeter_stderr stderr=self.jmeter_stderr)
)
except OSError: except OSError:
logger.debug( logger.debug(
"Unable to start JMeter process. Args: %s, Executable: %s", "Unable to start JMeter process. Args: %s, Executable: %s",
@ -126,8 +128,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
self.jmeter_path, self.jmeter_path,
exc_info=True) exc_info=True)
raise RuntimeError( raise RuntimeError(
"Unable to access to JMeter executable file or it does not exist: %s" % "Unable to access to JMeter executable file or it does not exist: %s"
self.jmeter_path) % self.jmeter_path)
self.start_time = time.time() self.start_time = time.time()
def is_test_finished(self): def is_test_finished(self):
@ -151,8 +153,9 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
def end_test(self, retcode): def end_test(self, retcode):
if self.jmeter_process: if self.jmeter_process:
logger.info("Terminating jmeter process group with PID %s", logger.info(
self.jmeter_process.pid) "Terminating jmeter process group with PID %s",
self.jmeter_process.pid)
try: try:
os.killpg(self.jmeter_process.pid, signal.SIGTERM) os.killpg(self.jmeter_process.pid, signal.SIGTERM)
except OSError as exc: except OSError as exc:
@ -191,16 +194,20 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
if self.ext_log in ['errors', 'all']: if self.ext_log in ['errors', 'all']:
level_map = {'errors': 'true', 'all': 'false'} level_map = {'errors': 'true', 'all': 'false'}
tpl_resource = 'jmeter_writer_ext.xml' tpl_resource = 'jmeter_writer_ext.xml'
tpl_args = {'jtl': self.jtl_file, 'udv': udv, tpl_args = {
'ext_log': self.ext_log_file, 'jtl': self.jtl_file,
'ext_level': level_map[self.ext_log], 'udv': udv,
'save_connect': save_connect} 'ext_log': self.ext_log_file,
'ext_level': level_map[self.ext_log],
'save_connect': save_connect
}
else: else:
tpl_resource = 'jmeter_writer.xml' tpl_resource = 'jmeter_writer.xml'
tpl_args = { tpl_args = {
'jtl': self.jtl_file, 'jtl': self.jtl_file,
'udv': udv, 'udv': udv,
'save_connect': save_connect} 'save_connect': save_connect
}
tpl = resource_string(__name__, 'config/' + tpl_resource) tpl = resource_string(__name__, 'config/' + tpl_resource)
@ -258,7 +265,8 @@ class JMeterInfoWidget(AbstractInfoWidget, AggregateResultListener):
template += " Duration: %s\n" template += " Duration: %s\n"
template += "Active Threads: %s\n" template += "Active Threads: %s\n"
template += " Responses/s: %s" template += " Responses/s: %s"
data = (os.path.basename(self.jmeter.original_jmx), duration, data = (
self.active_threads, self.RPS) os.path.basename(self.jmeter.original_jmx), duration,
self.active_threads, self.RPS)
return template % data return template % data

View File

@ -59,8 +59,7 @@ def _exc_to_http(param1):
int(param1) int(param1)
except: except:
logger.error( logger.error(
"JMeter wrote some strange data into codes column: %s", "JMeter wrote some strange data into codes column: %s", param1)
param1)
else: else:
return int(param1) return int(param1)
@ -113,19 +112,14 @@ def fix_latency(row):
# timeStamp,elapsed,label,responseCode,success,bytes,grpThreads,allThreads,Latency # timeStamp,elapsed,label,responseCode,success,bytes,grpThreads,allThreads,Latency
def string_to_df(data): def string_to_df(data):
chunk = pd.read_csv( chunk = pd.read_csv(
StringIO(data), StringIO(data), sep='\t', names=jtl_columns, dtype=jtl_types)
sep='\t',
names=jtl_columns,
dtype=jtl_types)
chunk["receive_ts"] = (chunk["send_ts"] + chunk['interval_real']) / 1000.0 chunk["receive_ts"] = (chunk["send_ts"] + chunk['interval_real']) / 1000.0
chunk['receive_sec'] = chunk["receive_ts"].astype(np.int64) chunk['receive_sec'] = chunk["receive_ts"].astype(np.int64)
chunk['interval_real'] = chunk["interval_real"] * 1000 # convert to µs chunk['interval_real'] = chunk["interval_real"] * 1000 # convert to µs
chunk.set_index(['receive_sec'], inplace=True) chunk.set_index(['receive_sec'], inplace=True)
l = len(chunk) l = len(chunk)
chunk['connect_time'] = ( chunk['connect_time'] = (chunk['connect_time'].fillna(0) *
chunk['connect_time'].fillna(0) * 1000).astype(np.int64)
1000).astype(
np.int64)
chunk['latency'] = chunk['latency'] * 1000 chunk['latency'] = chunk['latency'] * 1000
chunk['latency'] = chunk.apply(fix_latency, axis=1) chunk['latency'] = chunk.apply(fix_latency, axis=1)
chunk['send_time'] = np.zeros(l) chunk['send_time'] = np.zeros(l)
@ -139,7 +133,6 @@ def string_to_df(data):
class JMeterStatAggregator(object): class JMeterStatAggregator(object):
def __init__(self, source): def __init__(self, source):
self.worker = agg.Worker({"allThreads": ["mean"]}, False) self.worker = agg.Worker({"allThreads": ["mean"]}, False)
self.source = source self.source = source
@ -147,16 +140,19 @@ class JMeterStatAggregator(object):
def __iter__(self): def __iter__(self):
for ts, chunk in self.source: for ts, chunk in self.source:
stats = self.worker.aggregate(chunk) stats = self.worker.aggregate(chunk)
yield [{'ts': ts, yield [{
'metrics': {'instances': stats['allThreads']['mean'], 'ts': ts,
'reqps': 0}}] 'metrics': {
'instances': stats['allThreads']['mean'],
'reqps': 0
}
}]
def close(self): def close(self):
pass pass
class JMeterReader(object): class JMeterReader(object):
def __init__(self, filename): def __init__(self, filename):
self.buffer = "" self.buffer = ""
self.stat_buffer = "" self.stat_buffer = ""
@ -165,8 +161,8 @@ class JMeterReader(object):
self.agg_finished = False self.agg_finished = False
self.closed = False self.closed = False
self.stat_queue = q.Queue() self.stat_queue = q.Queue()
self.stats_reader = JMeterStatAggregator(TimeChopper( self.stats_reader = JMeterStatAggregator(
self._read_stat_queue(), 3)) TimeChopper(self._read_stat_queue(), 3))
def _read_stat_queue(self): def _read_stat_queue(self):
while not self.closed: while not self.closed:

View File

@ -24,20 +24,17 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
def configure(self): def configure(self):
self.monitoring_logger = self.create_file_logger( self.monitoring_logger = self.create_file_logger(
'monitoring', self.get_option( 'monitoring', self.get_option('monitoring_log', 'monitoring.log'))
'monitoring_log', 'monitoring.log'))
self.aggregator_data_logger = self.create_file_logger( self.aggregator_data_logger = self.create_file_logger(
'aggregator_data', self.get_option('test_data_log', 'test_data.log')) 'aggregator_data',
self.get_option('test_data_log', 'test_data.log'))
self.core.job.subscribe_plugin(self) self.core.job.subscribe_plugin(self)
def create_file_logger(self, logger_name, file_name, formatter=None): def create_file_logger(self, logger_name, file_name, formatter=None):
loggr = logging.getLogger(logger_name) loggr = logging.getLogger(logger_name)
loggr.setLevel(logging.INFO) loggr.setLevel(logging.INFO)
handler = logging.FileHandler( handler = logging.FileHandler(
os.path.join( os.path.join(self.core.artifacts_dir, file_name), mode='w')
self.core.artifacts_dir,
file_name),
mode='w')
handler.setLevel(logging.INFO) handler.setLevel(logging.INFO)
if formatter: if formatter:
handler.setFormatter(formatter) handler.setFormatter(formatter)
@ -51,14 +48,19 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
@stats: stats about gun @stats: stats about gun
""" """
self.aggregator_data_logger.info( self.aggregator_data_logger.info(
json.dumps({'data': data, 'stats': stats})) json.dumps({
'data': data,
'stats': stats
}))
def monitoring_data(self, data_list): def monitoring_data(self, data_list):
if self.is_telegraf: if self.is_telegraf:
self.monitoring_logger.info(json.dumps(data_list)) self.monitoring_logger.info(json.dumps(data_list))
else: else:
[self.monitoring_logger.info(data.strip()) [
for data in data_list if data] self.monitoring_logger.info(data.strip()) for data in data_list
if data
]
@property @property
def is_telegraf(self): def is_telegraf(self):

View File

@ -41,8 +41,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
def configure(self): def configure(self):
# plugin part # plugin part
self.pom = resource_manager.resource_filename(self.get_option( self.pom = resource_manager.resource_filename(
"pom", "pom.xml")) self.get_option("pom", "pom.xml"))
self.testcase = self.get_option("testcase", "") self.testcase = self.get_option("testcase", "")
self.maven_args = self.get_option("mvn_args", '').split() self.maven_args = self.get_option("mvn_args", '').split()
@ -73,10 +73,11 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
process_stderr_file = self.core.mkstemp(".log", "maven_") process_stderr_file = self.core.mkstemp(".log", "maven_")
self.core.add_artifact_file(process_stderr_file) self.core.add_artifact_file(process_stderr_file)
self.process_stderr = open(process_stderr_file, 'w') self.process_stderr = open(process_stderr_file, 'w')
self.process = subprocess.Popen(args, self.process = subprocess.Popen(
stderr=self.process_stderr, args,
stdout=self.process_stderr, stderr=self.process_stderr,
close_fds=True) stdout=self.process_stderr,
close_fds=True)
def is_test_finished(self): def is_test_finished(self):
retcode = self.process.poll() retcode = self.process.poll()
@ -88,8 +89,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
def end_test(self, retcode): def end_test(self, retcode):
if self.process and self.process.poll() is None: if self.process and self.process.poll() is None:
logger.warn("Terminating worker process with PID %s", logger.warn(
self.process.pid) "Terminating worker process with PID %s", self.process.pid)
self.process.terminate() self.process.terminate()
if self.process_stderr: if self.process_stderr:
self.process_stderr.close() self.process_stderr.close()

View File

@ -1,5 +1,4 @@
class MavenReader(object): class MavenReader(object):
def close(self): def close(self):
pass pass
@ -8,7 +7,6 @@ class MavenReader(object):
class MavenStatsReader(object): class MavenStatsReader(object):
def close(self): def close(self):
pass pass

View File

@ -16,7 +16,6 @@ import signal
from threading import Thread from threading import Thread
from optparse import OptionParser from optparse import OptionParser
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -28,8 +27,10 @@ def signal_handler(sig, frame):
def set_sig_handler(): def set_sig_handler():
uncatchable = ['SIG_DFL', 'SIGSTOP', 'SIGKILL'] uncatchable = ['SIG_DFL', 'SIGSTOP', 'SIGKILL']
for sig_name in [s for s in dir(signal) if ( for sig_name in [
s.startswith("SIG") and s not in uncatchable)]: s for s in dir(signal)
if (s.startswith("SIG") and s not in uncatchable)
]:
try: try:
sig_num = getattr(signal, sig_name) sig_num = getattr(signal, sig_name)
signal.signal(sig_num, signal_handler) signal.signal(sig_num, signal_handler)
@ -53,7 +54,6 @@ class AbstractMetric:
class CpuLa(AbstractMetric): class CpuLa(AbstractMetric):
def columns(self, ): def columns(self, ):
return ['System_la1', 'System_la5', 'System_la15'] return ['System_la1', 'System_la5', 'System_la15']
@ -76,9 +76,11 @@ class CpuStat(AbstractMetric):
self.current_check = {} self.current_check = {}
def columns(self, ): def columns(self, ):
columns = ['System_csw', 'System_int', 'CPU_user', 'CPU_nice', columns = [
'CPU_system', 'CPU_idle', 'CPU_iowait', 'CPU_irq', 'System_csw', 'System_int', 'CPU_user', 'CPU_nice', 'CPU_system',
'CPU_softirq', 'System_numproc', 'System_numthreads'] 'CPU_idle', 'CPU_iowait', 'CPU_irq', 'CPU_softirq',
'System_numproc', 'System_numthreads'
]
return columns return columns
def check(self, ): def check(self, ):
@ -90,8 +92,9 @@ class CpuStat(AbstractMetric):
proc_stat_all = proc_stat_file.readlines() proc_stat_all = proc_stat_file.readlines()
proc_stat_file.close() proc_stat_file.close()
except Exception as exc: except Exception as exc:
logger.error('Error opening /proc/stat. Traceback: %s', logger.error(
traceback.format_exc(exc)) 'Error opening /proc/stat. Traceback: %s',
traceback.format_exc(exc))
result.append([''] * 9) result.append([''] * 9)
else: else:
# Parse data # Parse data
@ -105,8 +108,9 @@ class CpuStat(AbstractMetric):
if stat.startswith('intr '): if stat.startswith('intr '):
self.current_check['intr'] = float(stat.split()[1]) self.current_check['intr'] = float(stat.split()[1])
except Exception as exc: except Exception as exc:
logger.error('Error parsing /proc/stat data. Traceback: %s', logger.error(
traceback.format_exc(exc)) 'Error parsing /proc/stat data. Traceback: %s',
traceback.format_exc(exc))
# Context switches and interrups delta # Context switches and interrups delta
try: try:
@ -116,10 +120,10 @@ class CpuStat(AbstractMetric):
self.prev_check['intr'] = self.current_check['intr'] self.prev_check['intr'] = self.current_check['intr']
result.extend([''] * 2) result.extend([''] * 2)
else: else:
delta_csw = str(self.current_check['csw'] - delta_csw = str(
self.prev_check['csw']) self.current_check['csw'] - self.prev_check['csw'])
delta_intr = str(self.current_check['intr'] - delta_intr = str(
self.prev_check['intr']) self.current_check['intr'] - self.prev_check['intr'])
self.prev_check['csw'] = self.current_check['csw'] self.prev_check['csw'] = self.current_check['csw']
self.prev_check['intr'] = self.current_check['intr'] self.prev_check['intr'] = self.current_check['intr']
result.append(delta_csw) result.append(delta_csw)
@ -168,8 +172,9 @@ class CpuStat(AbstractMetric):
else: else:
pids.append(element) pids.append(element)
except Exception as exc: except Exception as exc:
logger.error('Error trying to count numprocs. Traceback: %s', logger.error(
traceback.format_exc(exc)) 'Error trying to count numprocs. Traceback: %s',
traceback.format_exc(exc))
result.append(['']) result.append([''])
else: else:
result.append(str(len(pids))) result.append(str(len(pids)))
@ -223,17 +228,15 @@ class Custom(AbstractMetric):
for el in self.tail: for el in self.tail:
cmnd = base64.b64decode(el.split(':')[1]) cmnd = base64.b64decode(el.split(':')[1])
logger.debug("Run custom check: tail -n 1 %s", cmnd) logger.debug("Run custom check: tail -n 1 %s", cmnd)
output = subprocess.Popen( output = subprocess.Popen(['tail', '-n', '1', cmnd],
['tail', '-n', '1', cmnd], stdout=subprocess.PIPE).communicate()[0]
stdout=subprocess.PIPE).communicate()[0]
res.append(self.diff_value(el, output.strip())) res.append(self.diff_value(el, output.strip()))
for el in self.call: for el in self.call:
cmnd = base64.b64decode(el.split(':')[1]) cmnd = base64.b64decode(el.split(':')[1])
logger.debug("Run custom check: %s", cmnd) logger.debug("Run custom check: %s", cmnd)
output = subprocess.Popen(cmnd, output = subprocess.Popen(
shell=True, cmnd, shell=True, stdout=subprocess.PIPE).stdout.read()
stdout=subprocess.PIPE).stdout.read()
res.append(self.diff_value(el, output.strip())) res.append(self.diff_value(el, output.strip()))
logger.debug("Collected:\n%s", res) logger.debug("Collected:\n%s", res)
return res return res
@ -253,7 +256,6 @@ class Custom(AbstractMetric):
class Disk(AbstractMetric): class Disk(AbstractMetric):
def __init__(self): def __init__(self):
AbstractMetric.__init__(self) AbstractMetric.__init__(self)
self.read = 0 self.read = 0
@ -281,8 +283,10 @@ class Disk(AbstractMetric):
writed += int(data[9]) writed += int(data[9])
if self.read or self.write: if self.read or self.write:
result = [str(size * (read - self.read)), result = [
str(size * (writed - self.write))] str(size * (read - self.read)),
str(size * (writed - self.write))
]
else: else:
result = ['', ''] result = ['', '']
@ -328,8 +332,8 @@ class Disk(AbstractMetric):
devs.append(dsk_name) devs.append(dsk_name)
break break
except Exception as exc: except Exception as exc:
logger.info("Failed: %s", logger.info(
traceback.format_exc(exc)) "Failed: %s", traceback.format_exc(exc))
except Exception as exc: except Exception as exc:
logger.info( logger.info(
"Failed to get block device name via /sys/devices/: %s", "Failed to get block device name via /sys/devices/: %s",
@ -347,14 +351,16 @@ class Mem(AbstractMetric):
def __init__(self): def __init__(self):
AbstractMetric.__init__(self) AbstractMetric.__init__(self)
self.name = 'advanced memory usage' self.name = 'advanced memory usage'
self.vars = ('MemUsed', 'Buffers', 'Cached', 'MemFree', 'Dirty', self.vars = (
'MemTotal') 'MemUsed', 'Buffers', 'Cached', 'MemFree', 'Dirty', 'MemTotal')
# self.open('/proc/meminfo') # self.open('/proc/meminfo')
def columns(self): def columns(self):
columns = ['Memory_total', 'Memory_used', 'Memory_free', columns = [
'Memory_shared', 'Memory_buff', 'Memory_cached'] 'Memory_total', 'Memory_used', 'Memory_free', 'Memory_shared',
'Memory_buff', 'Memory_cached'
]
logger.info("Start. Columns: %s" % columns) logger.info("Start. Columns: %s" % columns)
return columns return columns
@ -373,8 +379,10 @@ class Mem(AbstractMetric):
data.update({name: long(raw_value.split()[0]) / 1024.0}) data.update({name: long(raw_value.split()[0]) / 1024.0})
data['MemUsed'] = data['MemTotal'] - data['MemFree'] - data[ data['MemUsed'] = data['MemTotal'] - data['MemFree'] - data[
'Buffers'] - data['Cached'] 'Buffers'] - data['Cached']
result = [data['MemTotal'], data['MemUsed'], data['MemFree'], result = [
0, data['Buffers'], data['Cached']] data['MemTotal'], data['MemUsed'], data['MemFree'], 0,
data['Buffers'], data['Cached']
]
except Exception as e: except Exception as e:
logger.error("Can't get meminfo, %s", e, exc_info=True) logger.error("Can't get meminfo, %s", e, exc_info=True)
result.append([self.empty] * 9) result.append([self.empty] * 9)
@ -396,8 +404,7 @@ class NetRetrans(AbstractMetric):
return ['Net_retransmit', ] return ['Net_retransmit', ]
def check(self, ): def check(self, ):
self.fetch = lambda: int(commands.getoutput( self.fetch = lambda: int(commands.getoutput('netstat -s | grep "segments retransmited" | awk \'{print $1}\''))
'netstat -s | grep "segments retransmited" | awk \'{print $1}\''))
if self.retr_second is not None: if self.retr_second is not None:
self.retr_first = self.fetch() self.retr_first = self.fetch()
self.delta = [] self.delta = []
@ -415,8 +422,16 @@ class NetTcp(AbstractMetric):
def __init__(self): def __init__(self):
AbstractMetric.__init__(self) AbstractMetric.__init__(self)
self.fields = ['Net_closewait', 'Net_estab', 'Net_timewait', ] self.fields = [
self.keys = ['closed', 'estab', 'timewait', ] 'Net_closewait',
'Net_estab',
'Net_timewait',
]
self.keys = [
'closed',
'estab',
'timewait',
]
def columns(self, ): def columns(self, ):
return self.fields return self.fields
@ -427,6 +442,7 @@ class NetTcp(AbstractMetric):
if note set it to 0. if note set it to 0.
* make output ordered as "fields" list * make output ordered as "fields" list
""" """
def fetch(): def fetch():
return commands.getoutput("ss -s | sed -ne '/^TCP:/p'") return commands.getoutput("ss -s | sed -ne '/^TCP:/p'")
@ -457,7 +473,10 @@ class NetTxRx(AbstractMetric):
self.prev_tx = 0 self.prev_tx = 0
def columns(self, ): def columns(self, ):
return ['Net_tx', 'Net_rx', ] return [
'Net_tx',
'Net_rx',
]
def check(self, ): def check(self, ):
""" """
@ -477,6 +496,7 @@ class NetTxRx(AbstractMetric):
def position(sample): def position(sample):
return lines[0].split().index(sample) return lines[0].split().index(sample)
rx_pos = position('RX-OK') rx_pos = position('RX-OK')
tx_pos = position('TX-OK') tx_pos = position('TX-OK')
@ -504,7 +524,6 @@ class NetTxRx(AbstractMetric):
class Net(AbstractMetric): class Net(AbstractMetric):
def __init__(self): def __init__(self):
AbstractMetric.__init__(self) AbstractMetric.__init__(self)
self.recv = 0 self.recv = 0
@ -553,6 +572,7 @@ class Net(AbstractMetric):
logger.debug("Network recieved/sent bytes: %s", result) logger.debug("Network recieved/sent bytes: %s", result)
return result return result
# =========================== # ===========================
@ -588,13 +608,14 @@ class AgentWorker(Thread):
@staticmethod @staticmethod
def popen(cmnd): def popen(cmnd):
return subprocess.Popen(cmnd, return subprocess.Popen(
bufsize=0, cmnd,
preexec_fn=os.setsid, bufsize=0,
close_fds=True, preexec_fn=os.setsid,
shell=True, close_fds=True,
stdout=subprocess.PIPE, shell=True,
stderr=subprocess.PIPE) stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def run(self): def run(self):
logger.info("Running startup commands") logger.info("Running startup commands")
@ -628,8 +649,8 @@ class AgentWorker(Thread):
while not self.finished: while not self.finished:
logger.debug('Start check') logger.debug('Start check')
line = [] line = []
sync_time = str(self.c_start + (int(time.time()) - sync_time = str(
self.c_local_start)) self.c_start + (int(time.time()) - self.c_local_start))
line.extend([self.c_host, sync_time]) line.extend([self.c_host, sync_time])
# known metrics # known metrics
@ -638,8 +659,8 @@ class AgentWorker(Thread):
continue continue
try: try:
data = self.known_metrics[metric_name].check() data = self.known_metrics[metric_name].check()
if len(data) != len(self.known_metrics[ if len(data) != len(
metric_name].columns()): self.known_metrics[metric_name].columns()):
raise RuntimeError( raise RuntimeError(
"Data len not matched columns count: %s" % data) "Data len not matched columns count: %s" % data)
except Exception as e: except Exception as e:
@ -659,8 +680,7 @@ class AgentWorker(Thread):
sys.stdout.write(row + '\n') sys.stdout.write(row + '\n')
sys.stdout.flush() sys.stdout.flush()
except IOError as e: except IOError as e:
logger.error( logger.error("Can't send data to collector, terminating, %s", e)
"Can't send data to collector, terminating, %s", e)
self.finished = True self.finished = True
self.fixed_sleep(self.c_interval) self.fixed_sleep(self.c_interval)
@ -690,7 +710,6 @@ class AgentWorker(Thread):
class AgentConfig: class AgentConfig:
def __init__(self, def_cfg_path): def __init__(self, def_cfg_path):
self.c_interval = 1 self.c_interval = 1
self.c_host = socket.getfqdn() self.c_host = socket.getfqdn()
@ -717,12 +736,13 @@ class AgentConfig:
help='Config file path, default is: ./' + def_cfg_path, help='Config file path, default is: ./' + def_cfg_path,
default=def_cfg_path) default=def_cfg_path)
parser.add_option('-t', parser.add_option(
'--timestamp', '-t',
dest='timestamp', '--timestamp',
type='int', dest='timestamp',
help='Caller timestamp for synchronization', type='int',
default=self.c_local_start) help='Caller timestamp for synchronization',
default=self.c_local_start)
(options, args) = parser.parse_args() (options, args) = parser.parse_args()
self.c_start = options.timestamp self.c_start = options.timestamp
@ -813,8 +833,9 @@ if __name__ == '__main__':
logger.debug("Join the worker thread, waiting for cleanup") logger.debug("Join the worker thread, waiting for cleanup")
worker.join(10) worker.join(10)
if worker.isAlive(): if worker.isAlive():
logger.error("Worker have not finished shutdown in " logger.error(
"10 seconds, going to exit anyway") "Worker have not finished shutdown in "
"10 seconds, going to exit anyway")
sys.exit(1) sys.exit(1)
except KeyboardInterrupt: except KeyboardInterrupt:
if not worker.isAlive(): if not worker.isAlive():

View File

@ -78,11 +78,12 @@ class AgentClient(object):
def start(self): def start(self):
"""Start remote agent""" """Start remote agent"""
logger.debug('Start monitoring: %s', self.host) logger.debug('Start monitoring: %s', self.host)
self.session = self.ssh.async_session(" ".join([ self.session = self.ssh.async_session(
"DEBUG=1", self.python, self.path['AGENT_REMOTE_FOLDER'] + " ".join([
'/agent.py', '-c', self.path['AGENT_REMOTE_FOLDER'] + '/agent.cfg', "DEBUG=1", self.python, self.path['AGENT_REMOTE_FOLDER'] +
'-t', str(int(time.time())) '/agent.py', '-c', self.path['AGENT_REMOTE_FOLDER'] +
])) '/agent.cfg', '-t', str(int(time.time()))
]))
return self.session return self.session
def read_maybe(self): def read_maybe(self):
@ -103,8 +104,8 @@ class AgentClient(object):
try: try:
float(self.interval) float(self.interval)
except: except:
raise ValueError("Monitoring interval should be a number: '%s'" % raise ValueError(
self.interval) "Monitoring interval should be a number: '%s'" % self.interval)
cfg = ConfigParser.ConfigParser() cfg = ConfigParser.ConfigParser()
cfg.add_section('main') cfg.add_section('main')
@ -136,8 +137,8 @@ class AgentClient(object):
def install(self, loglevel): def install(self, loglevel):
"""Create folder and copy agent and metrics scripts to remote host""" """Create folder and copy agent and metrics scripts to remote host"""
logger.info("Installing monitoring agent at %s@%s...", self.username, logger.info(
self.host) "Installing monitoring agent at %s@%s...", self.username, self.host)
# create remote temp dir # create remote temp dir
cmd = self.python + ' -c "import tempfile; print tempfile.mkdtemp();"' cmd = self.python + ' -c "import tempfile; print tempfile.mkdtemp();"'
@ -145,37 +146,39 @@ class AgentClient(object):
try: try:
out, errors, err_code = self.ssh.execute(cmd) out, errors, err_code = self.ssh.execute(cmd)
except: except:
logger.error("Failed to install monitoring agent to %s", logger.error(
self.host, "Failed to install monitoring agent to %s",
exc_info=True) self.host,
exc_info=True)
return None return None
if errors: if errors:
logging.error("[%s] error: '%s'", self.host, errors) logging.error("[%s] error: '%s'", self.host, errors)
return None return None
if err_code: if err_code:
logging.error("Failed to create remote dir via SSH" logging.error(
" at %s@%s, code %s: %s" % (self.username, self.host, "Failed to create remote dir via SSH"
err_code, out.strip())) " at %s@%s, code %s: %s" %
(self.username, self.host, err_code, out.strip()))
return None return None
remote_dir = out.strip() remote_dir = out.strip()
if remote_dir: if remote_dir:
self.path['AGENT_REMOTE_FOLDER'] = remote_dir self.path['AGENT_REMOTE_FOLDER'] = remote_dir
logger.debug("Remote dir at %s:%s", self.host, logger.debug(
self.path['AGENT_REMOTE_FOLDER']) "Remote dir at %s:%s", self.host, self.path['AGENT_REMOTE_FOLDER'])
# Copy agent and config # Copy agent and config
agent_config = self.create_agent_config(loglevel) agent_config = self.create_agent_config(loglevel)
try: try:
self.ssh.send_file(self.path['AGENT_LOCAL_FOLDER'] + '/agent.py', self.ssh.send_file(
self.path['AGENT_REMOTE_FOLDER'] + '/agent.py') self.path['AGENT_LOCAL_FOLDER'] + '/agent.py',
self.ssh.send_file(agent_config, self.path['AGENT_REMOTE_FOLDER'] + '/agent.py')
self.path['AGENT_REMOTE_FOLDER'] + '/agent.cfg') self.ssh.send_file(
agent_config, self.path['AGENT_REMOTE_FOLDER'] + '/agent.cfg')
except: except:
logger.error("Failed to install agent on %s", logger.error(
self.host, "Failed to install agent on %s", self.host, exc_info=True)
exc_info=True)
return None return None
return agent_config return agent_config
@ -186,12 +189,12 @@ class AgentClient(object):
if self.session: if self.session:
self.session.send("stop\n") self.session.send("stop\n")
self.session.close() self.session.close()
fhandle, log_filename = tempfile.mkstemp('.log', fhandle, log_filename = tempfile.mkstemp(
"agent_" + self.host + "_") '.log', "agent_" + self.host + "_")
os.close(fhandle) os.close(fhandle)
try: try:
self.ssh.get_file(self.path['AGENT_REMOTE_FOLDER'] + "_agent.log", self.ssh.get_file(
log_filename) self.path['AGENT_REMOTE_FOLDER'] + "_agent.log", log_filename)
self.ssh.rm_r(self.path['AGENT_REMOTE_FOLDER']) self.ssh.rm_r(self.path['AGENT_REMOTE_FOLDER'])
except: except:
logger.error("Exception while uninstalling agent", exc_info=True) logger.error("Exception while uninstalling agent", exc_info=True)
@ -265,9 +268,7 @@ class MonitoringCollector(object):
logger.debug("Got data from agent: %s", data.strip()) logger.debug("Got data from agent: %s", data.strip())
self.send_data.append( self.send_data.append(
self.filter_unused_data( self.filter_unused_data(
self.filter_conf, self.filter_mask, data self.filter_conf, self.filter_mask, data))
)
)
logger.debug("Data after filtering: %s", self.send_data) logger.debug("Data after filtering: %s", self.send_data)
if not self.first_data_received and self.send_data: if not self.first_data_received and self.send_data:
@ -286,8 +287,10 @@ class MonitoringCollector(object):
def send_collected_data(self): def send_collected_data(self):
"""sends pending data set to listeners""" """sends pending data set to listeners"""
[listener.monitoring_data(self.send_data) [
for listener in self.listeners] listener.monitoring_data(self.send_data)
for listener in self.listeners
]
self.send_data = [] self.send_data = []
def get_host_config(self, host, target_hint): def get_host_config(self, host, target_hint):
@ -307,14 +310,18 @@ class MonitoringCollector(object):
hostname = host.get('address').lower() hostname = host.get('address').lower()
if hostname == '[target]': if hostname == '[target]':
if not target_hint: if not target_hint:
raise ValueError("Can't use [target] keyword with " raise ValueError(
"no target parameter specified") "Can't use [target] keyword with "
"no target parameter specified")
logger.debug("Using target hint: %s", target_hint) logger.debug("Using target hint: %s", target_hint)
hostname = target_hint.lower() hostname = target_hint.lower()
stats = [] stats = []
startups = [] startups = []
shutdowns = [] shutdowns = []
custom = {'tail': [], 'call': [], } custom = {
'tail': [],
'call': [],
}
metrics_count = 0 metrics_count = 0
for metric in host: for metric in host:
# known metrics # known metrics
@ -337,8 +344,9 @@ class MonitoringCollector(object):
isdiff = metric.get('diff') isdiff = metric.get('diff')
if not isdiff: if not isdiff:
isdiff = 0 isdiff = 0
stat = "%s:%s:%s" % (base64.b64encode(metric.get('label')), stat = "%s:%s:%s" % (
base64.b64encode(metric.text), isdiff) base64.b64encode(metric.get('label')),
base64.b64encode(metric.text), isdiff)
stats.append('Custom:' + stat) stats.append('Custom:' + stat)
custom[metric.get('measure', 'call')].append(stat) custom[metric.get('measure', 'call')].append(stat)
elif (str(metric.tag)).lower() == 'startup': elif (str(metric.tag)).lower() == 'startup':
@ -379,7 +387,9 @@ class MonitoringCollector(object):
'shutdowns': shutdowns, 'shutdowns': shutdowns,
# XXX: should be separate? # XXX: should be separate?
'stats': {hostname: stats}, 'stats': {
hostname: stats
},
} }
def getconfig(self, filename, target_hint): def getconfig(self, filename, target_hint):
@ -414,8 +424,9 @@ class MonitoringCollector(object):
try: try:
res.append(filter_list[key]) res.append(filter_list[key])
except IndexError: except IndexError:
logger.warn("Problems filtering data: %s with %s", mask, logger.warn(
len(filter_list)) "Problems filtering data: %s with %s", mask,
len(filter_list))
return None return None
return ';'.join(res) return ';'.join(res)
@ -527,8 +538,7 @@ class MonitoringDataDecoder(object):
self.metrics[host] = [] self.metrics[host] = []
for metric in data: for metric in data:
if metric.startswith("Custom:"): if metric.startswith("Custom:"):
metric = base64.standard_b64decode(metric.split(':')[ metric = base64.standard_b64decode(metric.split(':')[1])
1])
self.metrics[host].append(metric) self.metrics[host].append(metric)
data_dict[metric] = self.NA data_dict[metric] = self.NA
is_initial = True is_initial = True
@ -537,12 +547,13 @@ class MonitoringDataDecoder(object):
timestamp = data.pop(0) timestamp = data.pop(0)
if host not in self.metrics.keys(): if host not in self.metrics.keys():
raise ValueError("Host %s not in started metrics: %s" % raise ValueError(
(host, self.metrics)) "Host %s not in started metrics: %s" % (host, self.metrics))
if len(self.metrics[host]) != len(data): if len(self.metrics[host]) != len(data):
raise ValueError("Metrics len and data len differs: %s vs %s" % raise ValueError(
(len(self.metrics[host]), len(data))) "Metrics len and data len differs: %s vs %s" %
(len(self.metrics[host]), len(data)))
for metric in self.metrics[host]: for metric in self.metrics[host]:
data_dict[metric] = data.pop(0) data_dict[metric] = data.pop(0)
@ -550,4 +561,5 @@ class MonitoringDataDecoder(object):
logger.debug("Decoded data %s: %s", host, data_dict) logger.debug("Decoded data %s: %s", host, data_dict)
return host, data_dict, is_initial, timestamp return host, data_dict, is_initial, timestamp
# FIXME: 3 synchronize times between agent and collector better # FIXME: 3 synchronize times between agent and collector better

View File

@ -43,8 +43,8 @@ class Plugin(AbstractPlugin):
def start_test(self): def start_test(self):
if self.monitoring: if self.monitoring:
self.monitoring.load_start_time = time.time() self.monitoring.load_start_time = time.time()
logger.debug("load_start_time = %s" % logger.debug(
self.monitoring.load_start_time) "load_start_time = %s" % self.monitoring.load_start_time)
def get_available_options(self): def get_available_options(self):
return ["config", "default_target", 'ssh_timeout'] return ["config", "default_target", 'ssh_timeout']
@ -67,8 +67,8 @@ class Plugin(AbstractPlugin):
self.config = xmlfile self.config = xmlfile
if not os.path.exists(self.config): if not os.path.exists(self.config):
raise OSError("Monitoring config file not found: %s" % raise OSError(
self.config) "Monitoring config file not found: %s" % self.config)
if self.config == 'none': if self.config == 'none':
self.monitoring = None self.monitoring = None
@ -99,8 +99,8 @@ class Plugin(AbstractPlugin):
info = phantom.get_info() info = phantom.get_info()
if info: if info:
self.default_target = info.address self.default_target = info.address
logger.debug("Changed monitoring target to %s", logger.debug(
self.default_target) "Changed monitoring target to %s", self.default_target)
except KeyError as ex: except KeyError as ex:
logger.debug("Phantom plugin not found: %s", ex) logger.debug("Phantom plugin not found: %s", ex)
@ -192,8 +192,8 @@ class SaveMonToFile(MonitoringDataListener):
self.store.close() self.store.close()
class MonitoringWidget(AbstractInfoWidget, MonitoringDataListener, class MonitoringWidget(
MonitoringDataDecoder): AbstractInfoWidget, MonitoringDataListener, MonitoringDataDecoder):
""" """
Screen widget Screen widget
""" """
@ -252,8 +252,8 @@ class MonitoringWidget(AbstractInfoWidget, MonitoringDataListener,
res = "Monitoring is " + screen.markup.GREEN + \ res = "Monitoring is " + screen.markup.GREEN + \
"online" + screen.markup.RESET + ":\n" "online" + screen.markup.RESET + ":\n"
for hostname, metrics in self.data.items(): for hostname, metrics in self.data.items():
tm_stamp = datetime.datetime.fromtimestamp(float(self.time[ tm_stamp = datetime.datetime.fromtimestamp(
hostname])).strftime('%H:%M:%S') float(self.time[hostname])).strftime('%H:%M:%S')
res += ( res += (
" " + screen.markup.CYAN + "%s" + screen.markup.RESET + " " + screen.markup.CYAN + "%s" + screen.markup.RESET +
" at %s:\n") % (hostname, tm_stamp) " at %s:\n") % (hostname, tm_stamp)
@ -269,8 +269,8 @@ class MonitoringWidget(AbstractInfoWidget, MonitoringDataListener,
return res.strip() return res.strip()
class AbstractMetricCriterion(AbstractCriterion, MonitoringDataListener, class AbstractMetricCriterion(
MonitoringDataDecoder): AbstractCriterion, MonitoringDataListener, MonitoringDataDecoder):
""" Parent class for metric criterion """ """ Parent class for metric criterion """
def __init__(self, autostop, param_str): def __init__(self, autostop, param_str):
@ -290,8 +290,7 @@ class AbstractMetricCriterion(AbstractCriterion, MonitoringDataListener,
self.host = param_str.split(',')[0].strip() self.host = param_str.split(',')[0].strip()
self.metric = param_str.split(',')[1].strip() self.metric = param_str.split(',')[1].strip()
self.value_limit = float(param_str.split(',')[2]) self.value_limit = float(param_str.split(',')[2])
self.seconds_limit = expand_to_seconds(param_str.split(',')[ self.seconds_limit = expand_to_seconds(param_str.split(',')[3])
3])
self.last_second = None self.last_second = None
self.seconds_count = 0 self.seconds_count = 0
@ -311,9 +310,10 @@ class AbstractMetricCriterion(AbstractCriterion, MonitoringDataListener,
self.metric] == self.NA: self.metric] == self.NA:
data[self.metric] = 0 data[self.metric] = 0
logger.debug("Compare %s %s/%s=%s to %s", self.get_type_string(), logger.debug(
host, self.metric, data[self.metric], "Compare %s %s/%s=%s to %s",
self.value_limit) self.get_type_string(), host, self.metric, data[self.metric],
self.value_limit)
if self.comparison_fn(float(data[self.metric]), self.value_limit): if self.comparison_fn(float(data[self.metric]), self.value_limit):
if not self.seconds_count: if not self.seconds_count:
self.cause_second = self.last_second self.cause_second = self.last_second
@ -358,8 +358,9 @@ class MetricHigherCriterion(AbstractMetricCriterion):
return "%s/%s metric value is higher than %s for %s seconds" % items return "%s/%s metric value is higher than %s for %s seconds" % items
def widget_explain(self): def widget_explain(self):
items = (self.host, self.metric, self.value_limit, self.seconds_count, items = (
self.seconds_limit) self.host, self.metric, self.value_limit, self.seconds_count,
self.seconds_limit)
return "%s/%s > %s for %s/%ss" % items, float( return "%s/%s > %s for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit self.seconds_count) / self.seconds_limit
@ -385,8 +386,9 @@ class MetricLowerCriterion(AbstractMetricCriterion):
return "%s/%s metric value is lower than %s for %s seconds" % items return "%s/%s metric value is lower than %s for %s seconds" % items
def widget_explain(self): def widget_explain(self):
items = (self.host, self.metric, self.value_limit, self.seconds_count, items = (
self.seconds_limit) self.host, self.metric, self.value_limit, self.seconds_count,
self.seconds_limit)
return "%s/%s < %s for %s/%ss" % items, float( return "%s/%s < %s for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit self.seconds_count) / self.seconds_limit

View File

@ -10,7 +10,6 @@ logger = logging.getLogger(__name__) # pylint: disable=C0103
class OverloadClient(object): class OverloadClient(object):
def __init__(self): def __init__(self):
self.address = None self.address = None
self.token = None self.token = None
@ -77,8 +76,9 @@ class OverloadClient(object):
def get_task_data(self, task): def get_task_data(self, task):
return self.get("api/task/" + task + "/summary.json") return self.get("api/task/" + task + "/summary.json")
def new_job(self, task, person, tank, target_host, target_port, loadscheme, def new_job(
detailed_time, notify_list): self, task, person, tank, target_host, target_port, loadscheme,
detailed_time, notify_list):
data = { data = {
'task': task, 'task': task,
'person': person, 'person': person,
@ -110,20 +110,24 @@ class OverloadClient(object):
def get_job_summary(self, jobno): def get_job_summary(self, jobno):
result = self.get( result = self.get(
'api/job/' + str(jobno) + 'api/job/' + str(jobno) + "/summary.json?api_token=" +
"/summary.json?api_token=" + self.api_token) self.api_token)
return result[0] return result[0]
def close_job(self, jobno, retcode): def close_job(self, jobno, retcode):
params = {'exitcode': str(retcode), 'api_token': self.api_token, } params = {
'exitcode': str(retcode),
'api_token': self.api_token,
}
result = self.get('api/job/' + str(jobno) + '/close.json?' + result = self.get(
urllib.urlencode(params)) 'api/job/' + str(jobno) + '/close.json?' + urllib.urlencode(params))
return result[0]['success'] return result[0]['success']
def edit_job_metainfo(self, jobno, job_name, job_dsc, instances, ammo_path, def edit_job_metainfo(
loop_count, version_tested, is_regression, component, self, jobno, job_name, job_dsc, instances, ammo_path, loop_count,
tank_type, cmdline, is_starred): version_tested, is_regression, component, tank_type, cmdline,
is_starred):
data = { data = {
'name': job_name, 'name': job_name,
'description': job_dsc, 'description': job_dsc,
@ -151,11 +155,8 @@ class OverloadClient(object):
data['description'] = comment.strip() data['description'] = comment.strip()
response = self.post( response = self.post(
'api/job/' + 'api/job/' + str(jobno) + "/set_imbalance.json?api_token=" +
str(jobno) + self.api_token, data)
"/set_imbalance.json?api_token=" +
self.api_token,
data)
return response return response
def second_data_to_push_item(self, data, stat, timestamp, overall, case): def second_data_to_push_item(self, data, stat, timestamp, overall, case):
@ -191,20 +192,22 @@ class OverloadClient(object):
} }
} }
for q, value in zip(data["interval_real"]["q"]["q"], for q, value in zip(
data["interval_real"]["q"]["value"]): data["interval_real"]["q"]["q"],
data["interval_real"]["q"]["value"]):
api_data['trail']['q' + str(q)] = value / 1000.0 api_data['trail']['q' + str(q)] = value / 1000.0
for code, cnt in data["net_code"]["count"].iteritems(): for code, cnt in data["net_code"]["count"].iteritems():
api_data['net_codes'].append({'code': int(code), api_data['net_codes'].append({'code': int(code), 'count': int(cnt)})
'count': int(cnt)})
for code, cnt in data["proto_code"]["count"].iteritems(): for code, cnt in data["proto_code"]["count"].iteritems():
api_data['http_codes'].append({'code': int(code), api_data['http_codes'].append({
'count': int(cnt)}) 'code': int(code),
'count': int(cnt)
})
api_data['time_intervals'] = self.convert_hist(data["interval_real"][ api_data['time_intervals'] = self.convert_hist(
"hist"]) data["interval_real"]["hist"])
return api_data return api_data
def convert_hist(self, hist): def convert_hist(self, hist):
@ -228,11 +231,11 @@ class OverloadClient(object):
case_name = "__EMPTY__" case_name = "__EMPTY__"
if (len(case_name)) > 128: if (len(case_name)) > 128:
raise RuntimeError('tag (case) name is too long: ' + case_name) raise RuntimeError('tag (case) name is too long: ' + case_name)
push_item = self.second_data_to_push_item(case_data, stat_item, ts, push_item = self.second_data_to_push_item(
0, case_name) case_data, stat_item, ts, 0, case_name)
items.append(push_item) items.append(push_item)
overall = self.second_data_to_push_item(data_item["overall"], overall = self.second_data_to_push_item(
stat_item, ts, 1, '') data_item["overall"], stat_item, ts, 1, '')
items.append(overall) items.append(overall)
while True: while True:
@ -252,8 +255,9 @@ class OverloadClient(object):
"Retry in 10 sec: %s", ex) "Retry in 10 sec: %s", ex)
time.sleep(10) # FIXME this makes all plugins freeze time.sleep(10) # FIXME this makes all plugins freeze
except requests.exceptions.RequestException as ex: except requests.exceptions.RequestException as ex:
logger.warn("Failed to push second data to API," logger.warn(
" retry in 10 sec: %s", ex) "Failed to push second data to API,"
" retry in 10 sec: %s", ex)
time.sleep(10) # FIXME this makes all plugins freeze time.sleep(10) # FIXME this makes all plugins freeze
except Exception: # pylint: disable=W0703 except Exception: # pylint: disable=W0703
# something nasty happened, but we don't want to fail here # something nasty happened, but we don't want to fail here
@ -288,8 +292,9 @@ class OverloadClient(object):
' retry in 10s: %s', ex) ' retry in 10s: %s', ex)
time.sleep(10) # FIXME this makes all plugins freeze time.sleep(10) # FIXME this makes all plugins freeze
except requests.exceptions.RequestException as ex: except requests.exceptions.RequestException as ex:
logger.warning('Problems sending monitoring data,' logger.warning(
' retry in 10s: %s', ex) 'Problems sending monitoring data,'
' retry in 10s: %s', ex)
time.sleep(10) # FIXME this makes all plugins freeze time.sleep(10) # FIXME this makes all plugins freeze
except Exception: # pylint: disable=W0703 except Exception: # pylint: disable=W0703
# something irrecoverable happened # something irrecoverable happened
@ -298,13 +303,12 @@ class OverloadClient(object):
return return
def send_console(self, jobno, console): def send_console(self, jobno, console):
logger.debug("Sending console view [%s]: %s", len(console), logger.debug(
console[:64]) "Sending console view [%s]: %s", len(console), console[:64])
addr = ("api/job/%s/console.txt?api_token=" % jobno) + self.api_token, addr = ("api/job/%s/console.txt?api_token=" % jobno) + self.api_token,
self.post_raw(addr, {"console": console, }) self.post_raw(addr, {"console": console, })
def send_config_snapshot(self, jobno, config): def send_config_snapshot(self, jobno, config):
logger.debug("Sending config snapshot") logger.debug("Sending config snapshot")
addr = ("api/job/%s/configinfo.txt?api_token=" % addr = ("api/job/%s/configinfo.txt?api_token=" % jobno) + self.api_token
jobno) + self.api_token
self.post_raw(addr, {"configinfo": config, }) self.post_raw(addr, {"configinfo": config, })

View File

@ -58,17 +58,21 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
return __file__ return __file__
def get_available_options(self): def get_available_options(self):
opts = ["api_address", opts = [
"task", "api_address",
"job_name", "task",
"job_dsc", "job_name",
"notify", "job_dsc",
"ver", ] "notify",
opts += ["component", "ver",
"regress", ]
"operator", opts += [
"copy_config_to", "component",
"jobno_file", ] "regress",
"operator",
"copy_config_to",
"jobno_file",
]
opts += ["token_file"] opts += ["token_file"]
return opts return opts
@ -79,20 +83,22 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
try: try:
with open(filename, 'r') as handle: with open(filename, 'r') as handle:
data = handle.read().strip() data = handle.read().strip()
logger.info("Read authentication token from %s, " logger.info(
"token length is %d bytes", filename, "Read authentication token from %s, "
len(str(data))) "token length is %d bytes", filename, len(str(data)))
except IOError: except IOError:
logger.error("Failed to read Overload API token from %s", logger.error(
filename) "Failed to read Overload API token from %s", filename)
logger.info( logger.info(
"Get your Overload API token from https://overload.yandex.net and provide it via 'overload.token_file' parameter") "Get your Overload API token from https://overload.yandex.net and provide it via 'overload.token_file' parameter"
)
raise RuntimeError("API token error") raise RuntimeError("API token error")
return data return data
else: else:
logger.error("Overload API token filename is not defined") logger.error("Overload API token filename is not defined")
logger.info( logger.info(
"Get your Overload API token from https://overload.yandex.net and provide it via 'overload.token_file' parameter") "Get your Overload API token from https://overload.yandex.net and provide it via 'overload.token_file' parameter"
)
raise RuntimeError("API token error") raise RuntimeError("API token error")
def configure(self): def configure(self):
@ -102,17 +108,13 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
self.api_client.set_api_address(self.get_option("api_address")) self.api_client.set_api_address(self.get_option("api_address"))
self.api_client.set_api_timeout(self.get_option("api_timeout", 30)) self.api_client.set_api_timeout(self.get_option("api_timeout", 30))
self.api_client.set_api_token( self.api_client.set_api_token(
self.read_token( self.read_token(self.get_option("token_file", "")))
self.get_option(
"token_file", "")))
self.task = self.get_option("task", "DEFAULT") self.task = self.get_option("task", "DEFAULT")
self.job_name = unicode( self.job_name = unicode(
self.get_option( self.get_option("job_name", "none").decode("utf8"))
"job_name",
"none").decode("utf8"))
if self.job_name == "ask" and sys.stdin.isatty(): if self.job_name == "ask" and sys.stdin.isatty():
self.job_name = unicode(raw_input( self.job_name = unicode(
"Please, enter job_name: ").decode("utf8")) raw_input("Please, enter job_name: ").decode("utf8"))
self.job_dsc = unicode(self.get_option("job_dsc", "").decode("utf8")) self.job_dsc = unicode(self.get_option("job_dsc", "").decode("utf8"))
if self.job_dsc == "ask" and sys.stdin.isatty(): if self.job_dsc == "ask" and sys.stdin.isatty():
self.job_dsc = unicode( self.job_dsc = unicode(
@ -204,8 +206,9 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
logger.info("Detected target: %s", self.target) logger.info("Detected target: %s", self.target)
self.jobno = self.api_client.new_job( self.jobno = self.api_client.new_job(
self.task, self.operator, socket.getfqdn(), self.target, port, self.task, self.operator,
loadscheme, detailed_field, self.notify_list) socket.getfqdn(), self.target, port, loadscheme, detailed_field,
self.notify_list)
web_link = "%s%s" % (self.api_client.address, self.jobno) web_link = "%s%s" % (self.api_client.address, self.jobno)
logger.info("Web link: %s", web_link) logger.info("Web link: %s", web_link)
self.publish("jobno", self.jobno) self.publish("jobno", self.jobno)
@ -250,11 +253,11 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
if autostop and autostop.cause_criterion: if autostop and autostop.cause_criterion:
rps = 0 rps = 0
if autostop.cause_criterion.cause_second: if autostop.cause_criterion.cause_second:
rps = autostop.cause_criterion.cause_second[ rps = autostop.cause_criterion.cause_second[1]["metrics"][
1]["metrics"]["reqps"] "reqps"]
if not rps: if not rps:
rps = autostop.cause_criterion.cause_second[ rps = autostop.cause_criterion.cause_second[0][
0]["overall"]["interval_real"]["len"] "overall"]["interval_real"]["len"]
self.api_client.set_imbalance_and_dsc( self.api_client.set_imbalance_and_dsc(
self.jobno, rps, autostop.cause_criterion.explain()) self.jobno, rps, autostop.cause_criterion.explain())
@ -289,8 +292,10 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
self.api_client.push_monitoring_data( self.api_client.push_monitoring_data(
self.jobno, json.dumps(data_list)) self.jobno, json.dumps(data_list))
elif "Monitoring" in self.core.job.monitoring_plugin.__module__: elif "Monitoring" in self.core.job.monitoring_plugin.__module__:
[self.api_client.push_monitoring_data( [
self.jobno, data) for data in data_list if data] self.api_client.push_monitoring_data(self.jobno, data)
for data in data_list if data
]
else: else:
logger.warn("The test was stopped from Web interface") logger.warn("The test was stopped from Web interface")
@ -305,8 +310,9 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
config_filename = mon.config config_filename = mon.config
if config_filename and config_filename not in ['none', 'auto']: if config_filename and config_filename not in ['none', 'auto']:
with open(config_filename) as config_file: with open(config_filename) as config_file:
config.set(MonitoringPlugin.SECTION, "config_contents", config.set(
config_file.read()) MonitoringPlugin.SECTION, "config_contents",
config_file.read())
except Exception: # pylint: disable=W0703 except Exception: # pylint: disable=W0703
logger.debug("Can't get monitoring config", exc_info=True) logger.debug("Can't get monitoring config", exc_info=True)
@ -314,8 +320,8 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
config.write(output) config.write(output)
if self.jobno: if self.jobno:
try: try:
self.api_client.send_config_snapshot(self.jobno, self.api_client.send_config_snapshot(
output.getvalue()) self.jobno, output.getvalue())
except Exception: # pylint: disable=W0703 except Exception: # pylint: disable=W0703
logger.debug("Can't send config snapshot: %s", exc_info=True) logger.debug("Can't send config snapshot: %s", exc_info=True)
@ -329,11 +335,7 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
PLUGIN_DIR = os.path.join(self.core.artifacts_base_dir, self.SECTION) PLUGIN_DIR = os.path.join(self.core.artifacts_base_dir, self.SECTION)
if not os.path.exists(PLUGIN_DIR): if not os.path.exists(PLUGIN_DIR):
os.makedirs(PLUGIN_DIR) os.makedirs(PLUGIN_DIR)
os.symlink( os.symlink(self.core.artifacts_dir, os.path.join(PLUGIN_DIR, str(name)))
self.core.artifacts_dir,
os.path.join(
PLUGIN_DIR,
str(name)))
def _core_with_tank_api(self): def _core_with_tank_api(self):
""" """
@ -347,13 +349,12 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
else: else:
api_found = isinstance(self.core, yandex_tank_api.worker.TankCore) api_found = isinstance(self.core, yandex_tank_api.worker.TankCore)
logger.debug( logger.debug(
"We are%s running under API server", "We are%s running under API server", ""
"" if api_found else " likely not") if api_found else " likely not")
return api_found return api_found
class JobInfoWidget(AbstractInfoWidget): class JobInfoWidget(AbstractInfoWidget):
def __init__(self, sender): def __init__(self, sender):
AbstractInfoWidget.__init__(self) AbstractInfoWidget.__init__(self)
self.owner = sender self.owner = sender
@ -365,8 +366,9 @@ class JobInfoWidget(AbstractInfoWidget):
template = "Author: " + screen.markup.RED + "%s" + \ template = "Author: " + screen.markup.RED + "%s" + \
screen.markup.RESET + \ screen.markup.RESET + \
"%s\n Job: %s %s\n Web: %s%s" "%s\n Job: %s %s\n Web: %s%s"
data = (self.owner.operator[:1], self.owner.operator[1:], data = (
self.owner.jobno, self.owner.job_name, self.owner.operator[:1], self.owner.operator[1:], self.owner.jobno,
self.owner.api_client.address, self.owner.jobno) self.owner.job_name, self.owner.api_client.address,
self.owner.jobno)
return template % data return template % data

View File

@ -28,7 +28,10 @@ def linear_schedule(start_rps, end_rps, period):
def unlimited_schedule(*args): def unlimited_schedule(*args):
return {"LimiterType": "unlimited", "Parameters": {}, } return {
"LimiterType": "unlimited",
"Parameters": {},
}
step_producers = { step_producers = {
@ -40,8 +43,8 @@ step_producers = {
def parse_schedule(schedule): def parse_schedule(schedule):
steps = [ steps = [
step.strip() step.strip() for step in " ".join(schedule.split("\n")).split(')')
for step in " ".join(schedule.split("\n")).split(')') if step.strip() if step.strip()
] ]
if len(steps) > 1: if len(steps) > 1:
raise NotImplementedError("Composite schedules not implemented yet") raise NotImplementedError("Composite schedules not implemented yet")
@ -50,12 +53,11 @@ def parse_schedule(schedule):
if schedule_type in step_producers: if schedule_type in step_producers:
return step_producers[schedule_type](*params) return step_producers[schedule_type](*params)
else: else:
raise NotImplementedError("Step of type %s is not implemented" % raise NotImplementedError(
schedule_type) "Step of type %s is not implemented" % schedule_type)
class PandoraConfig(object): class PandoraConfig(object):
def __init__(self): def __init__(self):
self.pools = [] self.pools = []
@ -70,10 +72,9 @@ class PandoraConfig(object):
class PoolConfig(object): class PoolConfig(object):
def __init__(self): def __init__(self):
self.config = json.loads(resource_string( self.config = json.loads(
__name__, 'config/pandora_pool_default.json')) resource_string(__name__, 'config/pandora_pool_default.json'))
def set_ammo(self, ammo): def set_ammo(self, ammo):
self.config["AmmoProvider"]["AmmoSource"] = ammo self.config["AmmoProvider"]["AmmoSource"] = ammo

View File

@ -34,16 +34,17 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
return __file__ return __file__
def get_available_options(self): def get_available_options(self):
opts = ["pandora_cmd", "buffered_seconds", "ammo", "loop", opts = [
"sample_log", "config_file", "startup_schedule", "pandora_cmd", "buffered_seconds", "ammo", "loop", "sample_log",
"user_schedule", "gun_type"] "config_file", "startup_schedule", "user_schedule", "gun_type"
]
return opts return opts
def configure(self): def configure(self):
# plugin part # plugin part
self.pandora_cmd = self.get_option("pandora_cmd", "pandora") self.pandora_cmd = self.get_option("pandora_cmd", "pandora")
self.buffered_seconds = int(self.get_option("buffered_seconds", self.buffered_seconds = int(
self.buffered_seconds)) self.get_option("buffered_seconds", self.buffered_seconds))
pool_config = PoolConfig() pool_config = PoolConfig()
@ -94,8 +95,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
self.pandora_config_file = self.get_option("config_file", "") self.pandora_config_file = self.get_option("config_file", "")
if not self.pandora_config_file: if not self.pandora_config_file:
self.pandora_config_file = self.core.mkstemp(".json", self.pandora_config_file = self.core.mkstemp(
"pandora_config_") ".json", "pandora_config_")
self.core.add_artifact_file(self.pandora_config_file) self.core.add_artifact_file(self.pandora_config_file)
with open(self.pandora_config_file, 'w') as config_file: with open(self.pandora_config_file, 'w') as config_file:
config_file.write(self.pandora_config.json()) config_file.write(self.pandora_config.json())
@ -133,10 +134,11 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
process_stderr_file = self.core.mkstemp(".log", "pandora_") process_stderr_file = self.core.mkstemp(".log", "pandora_")
self.core.add_artifact_file(process_stderr_file) self.core.add_artifact_file(process_stderr_file)
self.process_stderr = open(process_stderr_file, 'w') self.process_stderr = open(process_stderr_file, 'w')
self.process = subprocess.Popen(args, self.process = subprocess.Popen(
stderr=self.process_stderr, args,
stdout=self.process_stderr, stderr=self.process_stderr,
close_fds=True) stdout=self.process_stderr,
close_fds=True)
def is_test_finished(self): def is_test_finished(self):
retcode = self.process.poll() retcode = self.process.poll()
@ -148,8 +150,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
def end_test(self, retcode): def end_test(self, retcode):
if self.process and self.process.poll() is None: if self.process and self.process.poll() is None:
logger.warn("Terminating worker process with PID %s", logger.warn(
self.process.pid) "Terminating worker process with PID %s", self.process.pid)
self.process.terminate() self.process.terminate()
if self.process_stderr: if self.process_stderr:
self.process_stderr.close() self.process_stderr.close()

View File

@ -13,25 +13,31 @@ class PandoraStatsReader(object):
pandora_response = requests.get("http://localhost:1234/debug/vars") pandora_response = requests.get("http://localhost:1234/debug/vars")
pandora_stat = pandora_response.json() pandora_stat = pandora_response.json()
return [{'ts': int(time.time() - 1), return [{
'metrics': { 'ts': int(time.time() - 1),
'instances': 'metrics': {
pandora_stat.get("engine_ActiveRequests"), 'instances': pandora_stat.get("engine_ActiveRequests"),
'reqps': pandora_stat.get("engine_ReqPS"), 'reqps': pandora_stat.get("engine_ReqPS"),
}}] }
}]
except requests.ConnectionError: except requests.ConnectionError:
logger.info("Pandora expvar http interface is unavailable") logger.info("Pandora expvar http interface is unavailable")
except requests.HTTPError: except requests.HTTPError:
logger.warning("Pandora expvar http interface is unavailable", logger.warning(
exc_info=True) "Pandora expvar http interface is unavailable", exc_info=True)
except Exception: except Exception:
logger.warning("Couldn't decode pandora stat:\n%s\n", logger.warning(
pandora_response.text, "Couldn't decode pandora stat:\n%s\n",
exc_info=True) pandora_response.text,
exc_info=True)
return [{'ts': int(time.time() - 1), return [{
'metrics': {'instances': 0, 'ts': int(time.time() - 1),
'reqps': 0}}] 'metrics': {
'instances': 0,
'reqps': 0
}
}]
def close(self): def close(self):
pass pass

View File

@ -57,8 +57,9 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
return __file__ return __file__
def get_available_options(self): def get_available_options(self):
opts = ["phantom_path", "buffered_seconds", "exclude_markers", opts = [
"affinity"] "phantom_path", "buffered_seconds", "exclude_markers", "affinity"
]
opts += [PhantomConfig.OPTION_PHOUT, self.OPTION_CONFIG] opts += [PhantomConfig.OPTION_PHOUT, self.OPTION_CONFIG]
opts += PhantomConfig.get_available_options() opts += PhantomConfig.get_available_options()
return opts return opts
@ -68,11 +69,11 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
self.config = self.get_option(self.OPTION_CONFIG, '') self.config = self.get_option(self.OPTION_CONFIG, '')
self.phantom_path = self.get_option("phantom_path", 'phantom') self.phantom_path = self.get_option("phantom_path", 'phantom')
self.enum_ammo = self.get_option("enum_ammo", False) self.enum_ammo = self.get_option("enum_ammo", False)
self.buffered_seconds = int(self.get_option("buffered_seconds", self.buffered_seconds = int(
self.buffered_seconds)) self.get_option("buffered_seconds", self.buffered_seconds))
self.exclude_markers = set(filter( self.exclude_markers = set(
(lambda marker: marker != ''), self.get_option('exclude_markers', filter((lambda marker: marker != ''),
[]).split(' '))) self.get_option('exclude_markers', []).split(' ')))
self.taskset_affinity = self.get_option('affinity', '') self.taskset_affinity = self.get_option('affinity', '')
try: try:
@ -83,8 +84,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
"No autostop plugin found, not adding instances criterion") "No autostop plugin found, not adding instances criterion")
self.predefined_phout = self.get_option(PhantomConfig.OPTION_PHOUT, '') self.predefined_phout = self.get_option(PhantomConfig.OPTION_PHOUT, '')
if not self.get_option(self.OPTION_CONFIG, if not self.get_option(
'') and self.predefined_phout: self.OPTION_CONFIG, '') and self.predefined_phout:
self.phout_import_mode = True self.phout_import_mode = True
if not self.config and not self.phout_import_mode: if not self.config and not self.phout_import_mode:
@ -111,23 +112,26 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
"Config check failed. Subprocess returned code %s" % "Config check failed. Subprocess returned code %s" %
retcode) retcode)
if result[2]: if result[2]:
raise RuntimeError("Subprocess returned message: %s" % raise RuntimeError(
result[2]) "Subprocess returned message: %s" % result[2])
reader = PhantomReader(self.phantom.phout_file) reader = PhantomReader(self.phantom.phout_file)
logger.debug("Linking sample reader to aggregator." logger.debug(
" Reading samples from %s", self.phantom.phout_file) "Linking sample reader to aggregator."
" Reading samples from %s", self.phantom.phout_file)
logger.debug("Linking stats reader to aggregator." logger.debug(
" Reading stats from %s", self.phantom.stat_log) "Linking stats reader to aggregator."
" Reading stats from %s", self.phantom.stat_log)
else: else:
reader = PhantomReader(self.predefined_phout) reader = PhantomReader(self.predefined_phout)
logger.debug("Linking sample reader to aggregator." logger.debug(
" Reading samples from %s", self.predefined_phout) "Linking sample reader to aggregator."
" Reading samples from %s", self.predefined_phout)
if aggregator: if aggregator:
aggregator.reader = reader aggregator.reader = reader
info = self.phantom.get_info() info = self.phantom.get_info()
aggregator.stats_reader = PhantomStatsReader(self.phantom.stat_log, aggregator.stats_reader = PhantomStatsReader(
info) self.phantom.stat_log, info)
aggregator.add_result_listener(self) aggregator.add_result_listener(self)
try: try:
@ -151,27 +155,30 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
def start_test(self): def start_test(self):
if not self.phout_import_mode: if not self.phout_import_mode:
args = [self.phantom_path, 'run', self.config] args = [self.phantom_path, 'run', self.config]
logger.debug("Starting %s with arguments: %s", self.phantom_path, logger.debug(
args) "Starting %s with arguments: %s", self.phantom_path, args)
if self.taskset_affinity != '': if self.taskset_affinity != '':
args = [self.core.taskset_path, '-c', self.taskset_affinity args = [
] + args self.core.taskset_path, '-c', self.taskset_affinity
logger.debug("Enabling taskset for phantom with affinity: %s," ] + args
" cores count: %d", self.taskset_affinity, logger.debug(
self.cpu_count) "Enabling taskset for phantom with affinity: %s,"
" cores count: %d", self.taskset_affinity, self.cpu_count)
self.phantom_start_time = time.time() self.phantom_start_time = time.time()
phantom_stderr_file = self.core.mkstemp(".log", phantom_stderr_file = self.core.mkstemp(
"phantom_stdout_stderr_") ".log", "phantom_stdout_stderr_")
self.core.add_artifact_file(phantom_stderr_file) self.core.add_artifact_file(phantom_stderr_file)
self.phantom_stderr = open(phantom_stderr_file, 'w') self.phantom_stderr = open(phantom_stderr_file, 'w')
self.process = subprocess.Popen(args, self.process = subprocess.Popen(
stderr=self.phantom_stderr, args,
stdout=self.phantom_stderr, stderr=self.phantom_stderr,
close_fds=True) stdout=self.phantom_stderr,
close_fds=True)
else: else:
if not os.path.exists(self.predefined_phout): if not os.path.exists(self.predefined_phout):
raise RuntimeError("Phout file not exists for import: %s" % raise RuntimeError(
self.predefined_phout) "Phout file not exists for import: %s" %
self.predefined_phout)
logger.warn( logger.warn(
"Will import phout file instead of running phantom: %s", "Will import phout file instead of running phantom: %s",
self.predefined_phout) self.predefined_phout)
@ -180,14 +187,13 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
if not self.phout_import_mode: if not self.phout_import_mode:
retcode = self.process.poll() retcode = self.process.poll()
if retcode is not None: if retcode is not None:
logger.info("Phantom done its work with exit code: %s", logger.info("Phantom done its work with exit code: %s", retcode)
retcode)
return abs(retcode) return abs(retcode)
else: else:
info = self.get_info() info = self.get_info()
if info: if info:
eta = int(info.duration) - (int(time.time()) - eta = int(info.duration) - (
int(self.phantom_start_time)) int(time.time()) - int(self.phantom_start_time))
self.publish('eta', eta) self.publish('eta', eta)
return -1 return -1
else: else:
@ -199,8 +205,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
def end_test(self, retcode): def end_test(self, retcode):
if self.process and self.process.poll() is None: if self.process and self.process.poll() is None:
logger.warn("Terminating phantom process with PID %s", logger.warn(
self.process.pid) "Terminating phantom process with PID %s", self.process.pid)
self.process.terminate() self.process.terminate()
if self.process: if self.process:
self.process.communicate() self.process.communicate()
@ -255,8 +261,7 @@ class UsedInstancesCriterion(AbstractCriterion):
else: else:
self.level = int(level_str) self.level = int(level_str)
self.is_relative = False self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[ self.seconds_limit = expand_to_seconds(param_str.split(',')[1])
1])
try: try:
phantom = autostop.core.get_plugin_of_type(Plugin) phantom = autostop.core.get_plugin_of_type(Plugin)
@ -264,8 +269,9 @@ class UsedInstancesCriterion(AbstractCriterion):
if info: if info:
self.threads_limit = info.instances self.threads_limit = info.instances
if not self.threads_limit: if not self.threads_limit:
raise ValueError("Cannot create 'instances' criterion" raise ValueError(
" with zero instances limit") "Cannot create 'instances' criterion"
" with zero instances limit")
except KeyError: except KeyError:
logger.warning("No phantom module, 'instances' autostop disabled") logger.warning("No phantom module, 'instances' autostop disabled")
@ -302,10 +308,12 @@ class UsedInstancesCriterion(AbstractCriterion):
return level_str return level_str
def explain(self): def explain(self):
items = (self.get_level_str(), self.seconds_count, items = (
self.cause_second[0].get('ts')) self.get_level_str(), self.seconds_count,
return ("Testing threads (instances) utilization" self.cause_second[0].get('ts'))
" higher than %s for %ss, since %s" % items) return (
"Testing threads (instances) utilization"
" higher than %s for %ss, since %s" % items)
def widget_explain(self): def widget_explain(self):
items = (self.get_level_str(), self.seconds_count, self.seconds_limit) items = (self.get_level_str(), self.seconds_count, self.seconds_limit)

View File

@ -10,7 +10,6 @@ import datetime
import itertools as itt import itertools as itt
from StringIO import StringIO from StringIO import StringIO
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
phout_columns = [ phout_columns = [
@ -38,10 +37,7 @@ dtypes = {
def string_to_df(data): def string_to_df(data):
start_time = time.time() start_time = time.time()
chunk = pd.read_csv( chunk = pd.read_csv(
StringIO(data), StringIO(data), sep='\t', names=phout_columns, dtype=dtypes)
sep='\t',
names=phout_columns,
dtype=dtypes)
chunk['receive_ts'] = chunk.send_ts + chunk.interval_real / 1e6 chunk['receive_ts'] = chunk.send_ts + chunk.interval_real / 1e6
chunk['receive_sec'] = chunk.receive_ts.astype(np.int64) chunk['receive_sec'] = chunk.receive_ts.astype(np.int64)
@ -49,13 +45,11 @@ def string_to_df(data):
chunk['tag'] = chunk.tag.str.rsplit('#', 1, expand=True)[0] chunk['tag'] = chunk.tag.str.rsplit('#', 1, expand=True)[0]
chunk.set_index(['receive_sec'], inplace=True) chunk.set_index(['receive_sec'], inplace=True)
logger.debug("Chunk decode time: %.2fms", logger.debug("Chunk decode time: %.2fms", (time.time() - start_time) * 1000)
(time.time() - start_time) * 1000)
return chunk return chunk
class PhantomReader(object): class PhantomReader(object):
def __init__(self, filename, cache_size=1024 * 1024 * 50): def __init__(self, filename, cache_size=1024 * 1024 * 50):
self.buffer = "" self.buffer = ""
self.phout = open(filename, 'r') self.phout = open(filename, 'r')
@ -87,7 +81,6 @@ class PhantomReader(object):
class PhantomStatsReader(object): class PhantomStatsReader(object):
def __init__(self, filename, phantom_info): def __init__(self, filename, phantom_info):
self.phantom_info = phantom_info self.phantom_info = phantom_info
self.buffer = "" self.buffer = ""
@ -116,9 +109,13 @@ class PhantomStatsReader(object):
reqps = 0 reqps = 0
if offset >= 0 and offset < len(self.phantom_info.steps): if offset >= 0 and offset < len(self.phantom_info.steps):
reqps = self.phantom_info.steps[offset][0] reqps = self.phantom_info.steps[offset][0]
yield {'ts': chunk_date - 1, yield {
'metrics': {'instances': instances, 'ts': chunk_date - 1,
'reqps': reqps}} 'metrics': {
'instances': instances,
'reqps': reqps
}
}
def _read_stat_data(self, stat_file): def _read_stat_data(self, stat_file):
chunk = stat_file.read(1024 * 1024 * 50) chunk = stat_file.read(1024 * 1024 * 50)
@ -128,10 +125,12 @@ class PhantomStatsReader(object):
if len(parts) > 1: if len(parts) > 1:
ready_chunk = parts[0] ready_chunk = parts[0]
self.stat_buffer = parts[1] self.stat_buffer = parts[1]
chunks = [json.loads('{%s}}' % s) chunks = [
for s in ready_chunk.split('\n},')] json.loads('{%s}}' % s) for s in ready_chunk.split('\n},')
return list(itt.chain(*(self._decode_stat_data(chunk) ]
for chunk in chunks))) return list(
itt.chain(
*(self._decode_stat_data(chunk) for chunk in chunks)))
else: else:
self.stat_buffer += stat_file.readline() self.stat_buffer += stat_file.readline()

View File

@ -4,16 +4,14 @@ from yandextank.plugins.Phantom.reader import PhantomReader
class TestPhantomReader(object): class TestPhantomReader(object):
def test_read_all(self): def test_read_all(self):
reader = PhantomReader( reader = PhantomReader(
'yandextank/plugins/Phantom/tests/phout.dat', 'yandextank/plugins/Phantom/tests/phout.dat', cache_size=1024)
cache_size=1024)
df = pd.DataFrame() df = pd.DataFrame()
for chunk in reader: for chunk in reader:
if chunk is None: if chunk is None:
reader.close() reader.close()
else: else:
df = df.append(chunk) df = df.append(chunk)
assert(len(df) == 200) assert (len(df) == 200)
assert(df['interval_real'].mean() == 11000714.0) assert (df['interval_real'].mean() == 11000714.0)

View File

@ -42,12 +42,14 @@ class PhantomConfig:
@staticmethod @staticmethod
def get_available_options(): def get_available_options():
opts = ["threads", opts = [
"phantom_modules_path", "threads",
"additional_libs", "phantom_modules_path",
"writelog", "additional_libs",
"enum_ammo", "writelog",
"timeout", ] "enum_ammo",
"timeout",
]
opts += StreamConfig.get_available_options() opts += StreamConfig.get_available_options()
return opts return opts
@ -55,8 +57,8 @@ class PhantomConfig:
""" Read phantom tool specific options """ """ Read phantom tool specific options """
self.threads = self.get_option( self.threads = self.get_option(
"threads", str(int(multiprocessing.cpu_count() / 2) + 1)) "threads", str(int(multiprocessing.cpu_count() / 2) + 1))
self.phantom_modules_path = self.get_option("phantom_modules_path", self.phantom_modules_path = self.get_option(
"/usr/lib/phantom") "phantom_modules_path", "/usr/lib/phantom")
self.additional_libs = self.get_option("additional_libs", "") self.additional_libs = self.get_option("additional_libs", "")
self.answ_log_level = self.get_option("writelog", "none") self.answ_log_level = self.get_option("writelog", "none")
if self.answ_log_level == '0': if self.answ_log_level == '0':
@ -65,16 +67,17 @@ class PhantomConfig:
self.answ_log_level = 'all' self.answ_log_level = 'all'
self.timeout = parse_duration(self.get_option("timeout", "11s")) self.timeout = parse_duration(self.get_option("timeout", "11s"))
if self.timeout > 120000: if self.timeout > 120000:
logger.warning("You've set timeout over 2 minutes." logger.warning(
" Are you a functional tester?") "You've set timeout over 2 minutes."
" Are you a functional tester?")
self.answ_log = self.core.mkstemp(".log", "answ_") self.answ_log = self.core.mkstemp(".log", "answ_")
self.core.add_artifact_file(self.answ_log) self.core.add_artifact_file(self.answ_log)
self.phout_file = self.core.get_option(self.SECTION, self.OPTION_PHOUT, self.phout_file = self.core.get_option(
'') self.SECTION, self.OPTION_PHOUT, '')
if not self.phout_file: if not self.phout_file:
self.phout_file = self.core.mkstemp(".log", "phout_") self.phout_file = self.core.mkstemp(".log", "phout_")
self.core.set_option(self.SECTION, self.OPTION_PHOUT, self.core.set_option(
self.phout_file) self.SECTION, self.OPTION_PHOUT, self.phout_file)
self.core.add_artifact_file(self.phout_file) self.core.add_artifact_file(self.phout_file)
self.stat_log = self.core.mkstemp(".log", "phantom_stat_") self.stat_log = self.core.mkstemp(".log", "phantom_stat_")
self.core.add_artifact_file(self.stat_log) self.core.add_artifact_file(self.stat_log)
@ -82,14 +85,17 @@ class PhantomConfig:
self.core.add_artifact_file(self.phantom_log) self.core.add_artifact_file(self.phantom_log)
main_stream = StreamConfig( main_stream = StreamConfig(
self.core, len(self.streams), self.phout_file, self.answ_log, self.core,
len(self.streams), self.phout_file, self.answ_log,
self.answ_log_level, self.timeout, self.SECTION) self.answ_log_level, self.timeout, self.SECTION)
self.streams.append(main_stream) self.streams.append(main_stream)
for section in self.core.config.find_sections(self.SECTION + '-'): for section in self.core.config.find_sections(self.SECTION + '-'):
self.streams.append(StreamConfig( self.streams.append(
self.core, len(self.streams), self.phout_file, self.answ_log, StreamConfig(
self.answ_log_level, self.timeout, section)) self.core,
len(self.streams), self.phout_file, self.answ_log,
self.answ_log_level, self.timeout, section))
for stream in self.streams: for stream in self.streams:
stream.read_config() stream.read_config()
@ -175,8 +181,8 @@ class PhantomConfig:
result.ammo_file += stream.stepper_wrapper.ammo_file + ' ' result.ammo_file += stream.stepper_wrapper.ammo_file + ' '
result.ammo_count += stream.stepper_wrapper.ammo_count result.ammo_count += stream.stepper_wrapper.ammo_count
result.duration = max(result.duration, result.duration = max(
stream.stepper_wrapper.duration) result.duration, stream.stepper_wrapper.duration)
result.instances += stream.instances result.instances += stream.instances
if not result.ammo_count: if not result.ammo_count:
@ -189,8 +195,8 @@ class StreamConfig:
OPTION_INSTANCES_LIMIT = 'instances' OPTION_INSTANCES_LIMIT = 'instances'
def __init__(self, core, sequence, phout, answ, answ_level, timeout, def __init__(
section): self, core, sequence, phout, answ, answ_level, timeout, section):
self.core = core self.core = core
self.address_wizard = AddressWizard() self.address_wizard = AddressWizard()
@ -229,10 +235,14 @@ class StreamConfig:
@staticmethod @staticmethod
def get_available_options(): def get_available_options():
opts = ["ssl", "tank_type", 'gatling_ip', "method_prefix", opts = [
"source_log_prefix"] "ssl", "tank_type", 'gatling_ip', "method_prefix",
opts += ["phantom_http_line", "phantom_http_field_num", "source_log_prefix"
"phantom_http_field", "phantom_http_entity"] ]
opts += [
"phantom_http_line", "phantom_http_field_num", "phantom_http_field",
"phantom_http_entity"
]
opts += ['address', "port", StreamConfig.OPTION_INSTANCES_LIMIT] opts += ['address', "port", StreamConfig.OPTION_INSTANCES_LIMIT]
opts += StepperWrapper.get_available_options() opts += StepperWrapper.get_available_options()
opts += ["connection_test"] opts += ["connection_test"]
@ -245,16 +255,16 @@ class StreamConfig:
self.tank_type = self.get_option("tank_type", 'http') self.tank_type = self.get_option("tank_type", 'http')
# TODO: refactor. Maybe we should decide how to interact with # TODO: refactor. Maybe we should decide how to interact with
# StepperWrapper here. # StepperWrapper here.
self.instances = int(self.get_option(self.OPTION_INSTANCES_LIMIT, self.instances = int(
'1000')) self.get_option(self.OPTION_INSTANCES_LIMIT, '1000'))
self.gatling = ' '.join(self.get_option('gatling_ip', '').split("\n")) self.gatling = ' '.join(self.get_option('gatling_ip', '').split("\n"))
self.method_prefix = self.get_option("method_prefix", 'method_stream') self.method_prefix = self.get_option("method_prefix", 'method_stream')
self.method_options = self.get_option("method_options", '') self.method_options = self.get_option("method_options", '')
self.source_log_prefix = self.get_option("source_log_prefix", '') self.source_log_prefix = self.get_option("source_log_prefix", '')
self.phantom_http_line = self.get_option("phantom_http_line", "") self.phantom_http_line = self.get_option("phantom_http_line", "")
self.phantom_http_field_num = self.get_option("phantom_http_field_num", self.phantom_http_field_num = self.get_option(
"") "phantom_http_field_num", "")
self.phantom_http_field = self.get_option("phantom_http_field", "") self.phantom_http_field = self.get_option("phantom_http_field", "")
self.phantom_http_entity = self.get_option("phantom_http_entity", "") self.phantom_http_entity = self.get_option("phantom_http_entity", "")
@ -264,8 +274,8 @@ class StreamConfig:
self.ipv6, self.resolved_ip, self.port, self.address = self.address_wizard.resolve( self.ipv6, self.resolved_ip, self.port, self.address = self.address_wizard.resolve(
self.address, do_test_connect, explicit_port) self.address, do_test_connect, explicit_port)
logger.info("Resolved %s into %s:%s", self.address, self.resolved_ip, logger.info(
self.port) "Resolved %s into %s:%s", self.address, self.resolved_ip, self.port)
self.client_cipher_suites = self.get_option("client_cipher_suites", "") self.client_cipher_suites = self.get_option("client_cipher_suites", "")
self.client_certificate = self.get_option("client_certificate", "") self.client_certificate = self.get_option("client_certificate", "")
@ -345,11 +355,12 @@ class StreamConfig:
fname = 'phantom_benchmark_main.tpl' fname = 'phantom_benchmark_main.tpl'
else: else:
fname = 'phantom_benchmark_additional.tpl' fname = 'phantom_benchmark_additional.tpl'
template_str = template_str = resource_string(__name__, template_str = template_str = resource_string(
"config/" + fname) __name__, "config/" + fname)
tpl = string.Template(template_str) tpl = string.Template(template_str)
config = tpl.substitute(kwargs) config = tpl.substitute(kwargs)
return config return config
# ======================================================================== # ========================================================================

View File

@ -53,8 +53,9 @@ class PhantomProgressBarWidget(AbstractInfoWidget):
elif self.ammo_progress: elif self.ammo_progress:
left_part = self.ammo_count - self.ammo_progress left_part = self.ammo_count - self.ammo_progress
if left_part > 0: if left_part > 0:
eta_secs = int(float(dur_seconds) / float(self.ammo_progress) * eta_secs = int(
float(left_part)) float(dur_seconds) / float(self.ammo_progress) *
float(left_part))
else: else:
eta_secs = 0 eta_secs = 0
eta_time = datetime.timedelta(seconds=eta_secs) eta_time = datetime.timedelta(seconds=eta_secs)
@ -77,8 +78,8 @@ class PhantomProgressBarWidget(AbstractInfoWidget):
progress_chars += self.krutilka.next() progress_chars += self.krutilka.next()
res += color_bg + progress_chars + screen.markup.RESET + color_fg res += color_bg + progress_chars + screen.markup.RESET + color_fg
res += '~' * (pb_width - int(pb_width * res += '~' * (pb_width - int(pb_width * progress)
progress)) + screen.markup.RESET + ' ' ) + screen.markup.RESET + ' '
res += str_perc + "\n" res += str_perc + "\n"
eta = 'ETA: %s' % eta_time eta = 'ETA: %s' % eta_time
@ -123,17 +124,17 @@ class PhantomInfoWidget(AbstractInfoWidget):
info = self.owner.get_info() info = self.owner.get_info()
if self.owner.phantom: if self.owner.phantom:
template = "Hosts: %s => %s:%s\n Ammo: %s\nCount: %s\n Load: %s" template = "Hosts: %s => %s:%s\n Ammo: %s\nCount: %s\n Load: %s"
data = (socket.gethostname(), info.address, info.port, data = (
os.path.basename(info.ammo_file), self.ammo_count, socket.gethostname(), info.address, info.port,
' '.join(info.rps_schedule)) os.path.basename(info.ammo_file), self.ammo_count,
' '.join(info.rps_schedule))
res = template % data res = template % data
res += "\n\n" res += "\n\n"
res += "Active instances: " res += "Active instances: "
if float(self.instances) / self.instances_limit > 0.8: if float(self.instances) / self.instances_limit > 0.8:
res += screen.markup.RED + str( res += screen.markup.RED + str(self.instances) + screen.markup.RESET
self.instances) + screen.markup.RESET
elif float(self.instances) / self.instances_limit > 0.5: elif float(self.instances) / self.instances_limit > 0.5:
res += screen.markup.YELLOW + str( res += screen.markup.YELLOW + str(
self.instances) + screen.markup.RESET self.instances) + screen.markup.RESET
@ -141,8 +142,7 @@ class PhantomInfoWidget(AbstractInfoWidget):
res += str(self.instances) res += str(self.instances)
res += "\nPlanned requests: %s for %s\nActual responses: " % ( res += "\nPlanned requests: %s for %s\nActual responses: " % (
self.planned, self.planned, datetime.timedelta(seconds=self.planned_rps_duration))
datetime.timedelta(seconds=self.planned_rps_duration))
if not self.planned == self.RPS: if not self.planned == self.RPS:
res += screen.markup.YELLOW + str(self.RPS) + screen.markup.RESET res += screen.markup.YELLOW + str(self.RPS) + screen.markup.RESET
else: else:
@ -150,22 +150,22 @@ class PhantomInfoWidget(AbstractInfoWidget):
res += "\n Accuracy: " res += "\n Accuracy: "
if self.selfload < 80: if self.selfload < 80:
res += screen.markup.RED + ('%.2f' % res += screen.markup.RED + (
self.selfload) + screen.markup.RESET '%.2f' % self.selfload) + screen.markup.RESET
elif self.selfload < 95: elif self.selfload < 95:
res += screen.markup.YELLOW + ('%.2f' % res += screen.markup.YELLOW + (
self.selfload) + screen.markup.RESET '%.2f' % self.selfload) + screen.markup.RESET
else: else:
res += ('%.2f' % self.selfload) res += ('%.2f' % self.selfload)
res += "%\n Time lag: " res += "%\n Time lag: "
if self.time_lag > self.owner.buffered_seconds * 5: if self.time_lag > self.owner.buffered_seconds * 5:
logger.debug("Time lag: %s", self.time_lag) logger.debug("Time lag: %s", self.time_lag)
res += screen.markup.RED + str(datetime.timedelta( res += screen.markup.RED + str(
seconds=self.time_lag)) + screen.markup.RESET datetime.timedelta(seconds=self.time_lag)) + screen.markup.RESET
elif self.time_lag > self.owner.buffered_seconds: elif self.time_lag > self.owner.buffered_seconds:
res += screen.markup.YELLOW + str(datetime.timedelta( res += screen.markup.YELLOW + str(
seconds=self.time_lag)) + screen.markup.RESET datetime.timedelta(seconds=self.time_lag)) + screen.markup.RESET
else: else:
res += str(datetime.timedelta(seconds=self.time_lag)) res += str(datetime.timedelta(seconds=self.time_lag))
@ -175,6 +175,8 @@ class PhantomInfoWidget(AbstractInfoWidget):
self.RPS = data["overall"]["interval_real"]["len"] self.RPS = data["overall"]["interval_real"]["len"]
self.planned = stats["metrics"]["reqps"] self.planned = stats["metrics"]["reqps"]
self.instances = stats["metrics"]["instances"] self.instances = stats["metrics"]["instances"]
# TODO: # TODO:
# self.selfload = second_aggregate_data.overall.selfload # self.selfload = second_aggregate_data.overall.selfload
# self.time_lag = int(time.time() - time.mktime( # self.time_lag = int(time.time() - time.mktime(

View File

@ -27,7 +27,8 @@ class Plugin(AbstractPlugin):
self.default_target = None self.default_target = None
def _echo_wrapper(cmd): def _echo_wrapper(cmd):
return 'echo "====Executing: {cmd}"; {cmd}'.format(cmd=cmd) return 'echo "====Executing: {cmd}"; {cmd}'.format(cmd=cmd)
cmds = { cmds = {
"dpkg": "dpkg -l", "dpkg": "dpkg -l",
"uname": "uname -a", "uname": "uname -a",
@ -59,8 +60,7 @@ class Plugin(AbstractPlugin):
self.timeout = int(self.get_option("timeout", 3)) self.timeout = int(self.get_option("timeout", 3))
except: except:
logger.error( logger.error(
'Exception trying to configure Platform plugin', 'Exception trying to configure Platform plugin', exc_info=True)
exc_info=True)
self.logfile = self.core.mkstemp(".log", "platform_") self.logfile = self.core.mkstemp(".log", "platform_")
self.core.add_artifact_file(self.logfile) self.core.add_artifact_file(self.logfile)
@ -84,8 +84,7 @@ class Plugin(AbstractPlugin):
out, errors, err_code = self.ssh.execute(self.cmd) out, errors, err_code = self.ssh.execute(self.cmd)
except Exception: except Exception:
logger.warning( logger.warning(
"Failed to check remote system information at %s:%s", "Failed to check remote system information at %s:%s", host,
host,
self.port) self.port)
logger.debug( logger.debug(
"Failed to check remote system information at %s:%s", "Failed to check remote system information at %s:%s",

View File

@ -28,8 +28,8 @@ class Plugin(AbstractPlugin):
return ["interval", "disk_limit", "mem_limit"] return ["interval", "disk_limit", "mem_limit"]
def configure(self): def configure(self):
self.interval = expand_to_seconds(self.get_option( self.interval = expand_to_seconds(
"interval", self.interval)) self.get_option("interval", self.interval))
self.disk_limit = int(self.get_option("disk_limit", self.disk_limit)) self.disk_limit = int(self.get_option("disk_limit", self.disk_limit))
self.mem_limit = int(self.get_option("mem_limit", self.mem_limit)) self.mem_limit = int(self.get_option("mem_limit", self.mem_limit))
@ -58,18 +58,20 @@ class Plugin(AbstractPlugin):
self.log.debug("No disk usage info: %s", res[2]) self.log.debug("No disk usage info: %s", res[2])
return return
disk_free = res[1] disk_free = res[1]
self.log.debug("Disk free space: %s/%s", disk_free.strip(), self.log.debug(
self.disk_limit) "Disk free space: %s/%s", disk_free.strip(), self.disk_limit)
if int(disk_free.strip()) < self.disk_limit: if int(disk_free.strip()) < self.disk_limit:
raise RuntimeError( raise RuntimeError(
"Not enough local resources: disk space less than %sMB in %s: %sMB" % "Not enough local resources: disk space less than %sMB in %s: %sMB"
(self.disk_limit, self.core.artifacts_base_dir, int( % (
disk_free.strip()))) self.disk_limit, self.core.artifacts_base_dir,
int(disk_free.strip())))
def __check_mem(self): def __check_mem(self):
''' raise exception on RAM exceeded ''' ''' raise exception on RAM exceeded '''
mem_free = psutil.virtual_memory().available / 2**20 mem_free = psutil.virtual_memory().available / 2**20
self.log.debug("Memory free: %s/%s", mem_free, self.mem_limit) self.log.debug("Memory free: %s/%s", mem_free, self.mem_limit)
if mem_free < self.mem_limit: if mem_free < self.mem_limit:
raise RuntimeError("Not enough resources: free memory less " raise RuntimeError(
"than %sMB: %sMB" % (self.mem_limit, mem_free)) "Not enough resources: free memory less "
"than %sMB: %sMB" % (self.mem_limit, mem_free))

View File

@ -47,13 +47,14 @@ class Plugin(AbstractPlugin):
def is_test_finished(self): def is_test_finished(self):
if self.poll: if self.poll:
self.log.info("Executing: %s", self.poll) self.log.info("Executing: %s", self.poll)
retcode = util.execute(self.poll, retcode = util.execute(
shell=True, self.poll,
poll_period=0.1, shell=True,
catch_out=self.catch_out)[0] poll_period=0.1,
catch_out=self.catch_out)[0]
if retcode: if retcode:
self.log.warn("Non-zero exit code, interrupting test: %s", self.log.warn(
retcode) "Non-zero exit code, interrupting test: %s", retcode)
return retcode return retcode
return -1 return -1
@ -72,9 +73,7 @@ class Plugin(AbstractPlugin):
Execute and check exit code Execute and check exit code
''' '''
self.log.info("Executing: %s", cmd) self.log.info("Executing: %s", cmd)
retcode = util.execute(cmd, retcode = util.execute(
shell=True, cmd, shell=True, poll_period=0.1, catch_out=self.catch_out)[0]
poll_period=0.1,
catch_out=self.catch_out)[0]
if retcode: if retcode:
raise RuntimeError("Subprocess returned %s" % retcode) raise RuntimeError("Subprocess returned %s" % retcode)

View File

@ -7,12 +7,18 @@ import matplotlib
matplotlib.use("Agg") matplotlib.use("Agg")
import matplotlib.pyplot as plt # noqa:E402 import matplotlib.pyplot as plt # noqa:E402
_ALL_ = "All" _ALL_ = "All"
_CHARTSETS = { _CHARTSETS = {
"cpu-cpu-": {"CPU": _ALL_}, "cpu-cpu-": {
"net-": {"Network": {"bytes_sent", "bytes_recv"}}, "CPU": _ALL_
"diskio-": {"Disk IO": {"read_bytes", "write_bytes"}, "Disk latency": {"read_time", "write_time"}}, },
"net-": {
"Network": {"bytes_sent", "bytes_recv"}
},
"diskio-": {
"Disk IO": {"read_bytes", "write_bytes"},
"Disk latency": {"read_time", "write_time"}
},
} }
_CUSTOM_PREFIX = "custom:" _CUSTOM_PREFIX = "custom:"
_REPORT_FILE_OPTION = "report_file" _REPORT_FILE_OPTION = "report_file"
@ -33,7 +39,8 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
def configure(self): def configure(self):
self.__report_path = self.get_option(_REPORT_FILE_OPTION, "report.svg") self.__report_path = self.get_option(_REPORT_FILE_OPTION, "report.svg")
if os.path.split(self.__report_path)[0] or os.path.splitdrive(self.__report_path)[0]: if os.path.split(self.__report_path)[0] or os.path.splitdrive(
self.__report_path)[0]:
raise Exception("Only simple file names supported") raise Exception("Only simple file names supported")
self.__shooting_data = [] self.__shooting_data = []
@ -50,7 +57,8 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
def post_process(self, retcode): def post_process(self, retcode):
monitoring_chartsets = self.__get_monitoring_chartsets() monitoring_chartsets = self.__get_monitoring_chartsets()
min_x = self.__shooting_data[0]["ts"] # sync start of shooting and start of monitoring min_x = self.__shooting_data[0][
"ts"] # sync start of shooting and start of monitoring
seaborn.set(style="whitegrid", palette="Set2") seaborn.set(style="whitegrid", palette="Set2")
seaborn.despine() seaborn.despine()
@ -67,7 +75,8 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
plt.gca().legend(fontsize="x-small") plt.gca().legend(fontsize="x-small")
# monitoring # monitoring
for plot_num, chartset_data in enumerate(sorted(monitoring_chartsets.iteritems()), 1): for plot_num, chartset_data in enumerate(
sorted(monitoring_chartsets.iteritems()), 1):
chartset_title, signals = chartset_data chartset_title, signals = chartset_data
plt.subplot(plot_count, 1, plot_num + 1) plt.subplot(plot_count, 1, plot_num + 1)
@ -96,9 +105,12 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
for chartset_prefix, chartset_data in _CHARTSETS.iteritems(): for chartset_prefix, chartset_data in _CHARTSETS.iteritems():
if signal_prefix.startswith(chartset_prefix): if signal_prefix.startswith(chartset_prefix):
for chartset_title, chartset_signals in chartset_data.iteritems(): for chartset_title, chartset_signals in chartset_data.iteritems(
):
if chartset_signals is _ALL_ or signal_suffix in chartset_signals: if chartset_signals is _ALL_ or signal_suffix in chartset_signals:
return "{} {}".format(chartset_title, signal_prefix[len(chartset_prefix):]) return "{} {}".format(
chartset_title,
signal_prefix[len(chartset_prefix):])
else: else:
return None return None
else: else:
@ -115,11 +127,13 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
continue continue
signal_prefix, signal_suffix = signal_name.split("_", 1) signal_prefix, signal_suffix = signal_name.split("_", 1)
chartset_title = self.__find_monitoring_chartset(signal_prefix, signal_suffix) chartset_title = self.__find_monitoring_chartset(
signal_prefix, signal_suffix)
if not chartset_title: if not chartset_title:
continue continue
chartsets.setdefault((chartset_title), set()).add((signal_name, signal_suffix)) chartsets.setdefault((chartset_title), set()).add(
(signal_name, signal_suffix))
return chartsets return chartsets
@ -142,7 +156,8 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
y = {} y = {}
for data in self.__shooting_data: for data in self.__shooting_data:
timestamp = data["ts"] timestamp = data["ts"]
for variant, count in data["overall"][signal_name]["count"].iteritems(): for variant, count in data["overall"][signal_name][
"count"].iteritems():
x.setdefault(variant, []).append(timestamp - min_x) x.setdefault(variant, []).append(timestamp - min_x)
y.setdefault(variant, []).append(count) y.setdefault(variant, []).append(count)
return x, y return x, y

View File

@ -13,7 +13,6 @@ import time
from optparse import OptionParser from optparse import OptionParser
import Queue as q import Queue as q
logger = logging.getLogger("agent") logger = logging.getLogger("agent")
collector_logger = logging.getLogger("telegraf") collector_logger = logging.getLogger("telegraf")
@ -82,34 +81,35 @@ class Consolidator(object):
if data['name'] == 'diskio': if data['name'] == 'diskio':
data['name'] = "{metric_name}-{disk_id}".format( data['name'] = "{metric_name}-{disk_id}".format(
metric_name=data['name'], metric_name=data['name'],
disk_id=data['tags']['name'] disk_id=data['tags']['name'])
)
elif data['name'] == 'net': elif data['name'] == 'net':
data['name'] = "{metric_name}-{interface}".format( data[
metric_name=data['name'], interface=data['tags']['interface']) 'name'] = "{metric_name}-{interface}".format(
metric_name=data['name'],
interface=data['tags']['interface'])
elif data['name'] == 'cpu': elif data['name'] == 'cpu':
data['name'] = "{metric_name}-{cpu_id}".format( data['name'] = "{metric_name}-{cpu_id}".format(
metric_name=data['name'], metric_name=data['name'],
cpu_id=data['tags']['cpu'] cpu_id=data['tags']['cpu'])
)
key = data['name'] + "_" + key key = data['name'] + "_" + key
if key.endswith('_exec_value'): if key.endswith('_exec_value'):
key = key.replace('_exec_value', '') key = key.replace('_exec_value', '')
self.results[ts][key] = value self.results[ts][key] = value
except KeyError: except KeyError:
logger.error( logger.error(
'Malformed json from source: %s', chunk, exc_info=True) 'Malformed json from source: %s',
chunk,
exc_info=True)
except: except:
logger.error( logger.error(
'Something nasty happend in consolidator work', 'Something nasty happend in consolidator work',
exc_info=True) exc_info=True)
if len(self.results) > 5: if len(self.results) > 5:
ready_to_go_index = min(self.results) ready_to_go_index = min(self.results)
yield json.dumps( yield json.dumps({
{ ready_to_go_index:
ready_to_go_index: self.results.pop(ready_to_go_index, None) self.results.pop(ready_to_go_index, None)
} })
)
class Drain(threading.Thread): class Drain(threading.Thread):
@ -139,7 +139,6 @@ class Drain(threading.Thread):
class AgentWorker(threading.Thread): class AgentWorker(threading.Thread):
def __init__(self, telegraf_path): def __init__(self, telegraf_path):
super(AgentWorker, self).__init__() super(AgentWorker, self).__init__()
self.working_dir = os.path.dirname(__file__) self.working_dir = os.path.dirname(__file__)
@ -167,8 +166,7 @@ class AgentWorker(threading.Thread):
shell=True, shell=True,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, stdin=subprocess.PIPE, )
)
def read_startup_config(self, cfg_file='agent_startup.cfg'): def read_startup_config(self, cfg_file='agent_startup.cfg'):
try: try:
@ -188,12 +186,10 @@ class AgentWorker(threading.Thread):
logger.info( logger.info(
'Successfully loaded startup config.\n' 'Successfully loaded startup config.\n'
'Startups: %s\n' 'Startups: %s\n'
'Shutdowns: %s\n', self.startups, self.shutdowns 'Shutdowns: %s\n', self.startups, self.shutdowns)
)
except: except:
logger.error( logger.error(
'Error trying to read agent startup config', 'Error trying to read agent startup config', exc_info=True)
exc_info=True)
def run(self): def run(self):
logger.info("Running startup commands") logger.info("Running startup commands")
@ -204,9 +200,7 @@ class AgentWorker(threading.Thread):
logger.info('Starting metrics collector..') logger.info('Starting metrics collector..')
cmnd = "{telegraf} -config {working_dir}/agent.cfg".format( cmnd = "{telegraf} -config {working_dir}/agent.cfg".format(
telegraf=self.telegraf_path, telegraf=self.telegraf_path, working_dir=self.working_dir)
working_dir=self.working_dir
)
self.collector = self.popen(cmnd) self.collector = self.popen(cmnd)
telegraf_output = self.working_dir + '/monitoring.rawdata' telegraf_output = self.working_dir + '/monitoring.rawdata'
@ -218,23 +212,17 @@ class AgentWorker(threading.Thread):
time.sleep(1) time.sleep(1)
self.drain = Drain( self.drain = Drain(
Consolidator( Consolidator(DataReader(telegraf_output)), self.results)
DataReader(telegraf_output)
),
self.results
)
self.drain.start() self.drain.start()
self.drain_stdout = Drain( self.drain_stdout = Drain(
DataReader(self.collector.stdout, pipe=True), DataReader(
self.results_stdout self.collector.stdout, pipe=True), self.results_stdout)
)
self.drain_stdout.start() self.drain_stdout.start()
self.drain_err = Drain( self.drain_err = Drain(
DataReader(self.collector.stderr, pipe=True), DataReader(
self.results_err self.collector.stderr, pipe=True), self.results_err)
)
self.drain_err.start() self.drain_err.start()
while not self.finished: while not self.finished:
@ -243,9 +231,7 @@ class AgentWorker(threading.Thread):
data = self.results.get_nowait() data = self.results.get_nowait()
logger.debug( logger.debug(
'send %s bytes of data to collector', len(data)) 'send %s bytes of data to collector', len(data))
sys.stdout.write( sys.stdout.write(str(data) + '\n')
str(data) + '\n'
)
except q.Empty: except q.Empty:
break break
except: except:
@ -297,16 +283,21 @@ class AgentWorker(threading.Thread):
def main(): def main():
fname = os.path.dirname(__file__) + "/_agent.log" fname = os.path.dirname(__file__) + "/_agent.log"
logging.basicConfig( logging.basicConfig(
level=logging.DEBUG, filename=fname, level=logging.DEBUG,
filename=fname,
format='%(asctime)s [%(levelname)s] %(name)s:%(lineno)d %(message)s') format='%(asctime)s [%(levelname)s] %(name)s:%(lineno)d %(message)s')
parser = OptionParser() parser = OptionParser()
parser.add_option( parser.add_option(
"", "--telegraf", dest="telegraf_path", "",
"--telegraf",
dest="telegraf_path",
help="telegraf_path", help="telegraf_path",
default="/tmp/telegraf") default="/tmp/telegraf")
parser.add_option( parser.add_option(
"", "--host", dest="hostname_path", "",
"--host",
dest="hostname_path",
help="telegraf_path", help="telegraf_path",
default="/usr/bin/telegraf") default="/usr/bin/telegraf")
(options, args) = parser.parse_args() (options, args) = parser.parse_args()
@ -315,24 +306,24 @@ def main():
customs_script = os.path.dirname(__file__) + '/agent_customs.sh' customs_script = os.path.dirname(__file__) + '/agent_customs.sh'
try: try:
logger.info( logger.info(
'Trying to make telegraf executable: %s', 'Trying to make telegraf executable: %s', options.telegraf_path)
options.telegraf_path)
# 0o755 compatible with old python versions. 744 is NOT enough # 0o755 compatible with old python versions. 744 is NOT enough
os.chmod(options.telegraf_path, 493) os.chmod(options.telegraf_path, 493)
except OSError: except OSError:
logger.warning( logger.warning(
'Unable to set %s access rights to execute.', 'Unable to set %s access rights to execute.',
options.telegraf_path, exc_info=True) options.telegraf_path,
exc_info=True)
try: try:
logger.info( logger.info(
'Trying to make customs script executable: %s', 'Trying to make customs script executable: %s', customs_script)
customs_script)
# 0o755 compatible with old python versions. 744 is NOT enough # 0o755 compatible with old python versions. 744 is NOT enough
os.chmod(customs_script, 493) os.chmod(customs_script, 493)
except OSError: except OSError:
logger.warning( logger.warning(
'Unable to set %s access rights to execute.', 'Unable to set %s access rights to execute.',
customs_script, exc_info=True) customs_script,
exc_info=True)
worker = AgentWorker(options.telegraf_path) worker = AgentWorker(options.telegraf_path)
worker.read_startup_config() worker.read_startup_config()

View File

@ -58,10 +58,8 @@ class LocalhostClient(object):
customs_script = self.config.create_custom_exec_script() customs_script = self.config.create_custom_exec_script()
try: try:
copyfile( copyfile(
self.path['AGENT_LOCAL_FOLDER'] + self.path['AGENT_LOCAL_FOLDER'] + '/agent.py',
'/agent.py', self.workdir + '/agent.py')
self.workdir +
'/agent.py')
copyfile(agent_config, self.workdir + '/agent.cfg') copyfile(agent_config, self.workdir + '/agent.cfg')
copyfile(startup_config, self.workdir + '/agent_startup.cfg') copyfile(startup_config, self.workdir + '/agent_startup.cfg')
copyfile(customs_script, self.workdir + '/agent_customs.sh') copyfile(customs_script, self.workdir + '/agent_customs.sh')
@ -90,8 +88,7 @@ class LocalhostClient(object):
shell=True, shell=True,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, stdin=subprocess.PIPE, )
)
def start(self): def start(self):
"""Start local agent""" """Start local agent"""
@ -102,9 +99,7 @@ class LocalhostClient(object):
telegraf_path=self.path['TELEGRAF_LOCAL_PATH'], telegraf_path=self.path['TELEGRAF_LOCAL_PATH'],
host=self.host) host=self.host)
self.session = self.popen(command) self.session = self.popen(command)
self.reader_thread = threading.Thread( self.reader_thread = threading.Thread(target=self.read_buffer)
target=self.read_buffer
)
self.reader_thread.setDaemon(True) self.reader_thread.setDaemon(True)
return self.session return self.session
@ -123,8 +118,8 @@ class LocalhostClient(object):
except ValueError: except ValueError:
logger.debug( logger.debug(
'this exc most likely raised during interpreter shutdown\n' 'this exc most likely raised during interpreter shutdown\n'
'otherwise something really nasty happend', exc_info=True 'otherwise something really nasty happend',
) exc_info=True)
def uninstall(self): def uninstall(self):
""" """
@ -182,8 +177,8 @@ class SSHClient(object):
def install(self): def install(self):
"""Create folder and copy agent and metrics scripts to remote host""" """Create folder and copy agent and metrics scripts to remote host"""
logger.info("Installing monitoring agent at %s@%s...", self.username, logger.info(
self.host) "Installing monitoring agent at %s@%s...", self.username, self.host)
# create remote temp dir # create remote temp dir
cmd = self.python + ' -c "import tempfile; print tempfile.mkdtemp();"' cmd = self.python + ' -c "import tempfile; print tempfile.mkdtemp();"'
@ -210,9 +205,7 @@ class SSHClient(object):
if remote_dir: if remote_dir:
self.path['AGENT_REMOTE_FOLDER'] = remote_dir self.path['AGENT_REMOTE_FOLDER'] = remote_dir
logger.debug( logger.debug(
"Remote dir at %s:%s", "Remote dir at %s:%s", self.host, self.path['AGENT_REMOTE_FOLDER'])
self.host,
self.path['AGENT_REMOTE_FOLDER'])
# create collector config # create collector config
agent_config = self.config.create_collector_config( agent_config = self.config.create_collector_config(
@ -225,9 +218,7 @@ class SSHClient(object):
# support string formatting without indices # support string formatting without indices
remote_cmd = 'import os; print os.path.isfile("' + self.path[ remote_cmd = 'import os; print os.path.isfile("' + self.path[
'TELEGRAF_REMOTE_PATH'] + '")' 'TELEGRAF_REMOTE_PATH'] + '")'
cmd = self.python + ' -c \'{cmd}\''.format( cmd = self.python + ' -c \'{cmd}\''.format(cmd=remote_cmd)
cmd=remote_cmd
)
remote_telegraf_exists = "False" remote_telegraf_exists = "False"
try: try:
out, err, err_code = self.ssh.execute(cmd) out, err, err_code = self.ssh.execute(cmd)
@ -255,37 +246,30 @@ class SSHClient(object):
self.path['TELEGRAF_REMOTE_PATH']) self.path['TELEGRAF_REMOTE_PATH'])
elif os.path.isfile("/usr/bin/telegraf"): elif os.path.isfile("/usr/bin/telegraf"):
self.ssh.send_file( self.ssh.send_file(
'/usr/bin/telegraf', self.path['TELEGRAF_REMOTE_PATH'] '/usr/bin/telegraf', self.path['TELEGRAF_REMOTE_PATH'])
)
else: else:
logger.error( logger.error(
'Telegraf binary not found neither on %s nor on localhost at specified path: %s\n' 'Telegraf binary not found neither on %s nor on localhost at specified path: %s\n'
'You can download telegraf binaries here: https://github.com/influxdata/telegraf\n' 'You can download telegraf binaries here: https://github.com/influxdata/telegraf\n'
'or install debian package: `telegraf`', self.host, self.path['TELEGRAF_LOCAL_PATH']) 'or install debian package: `telegraf`', self.host,
self.path['TELEGRAF_LOCAL_PATH'])
return None, None, None return None, None, None
self.ssh.send_file( self.ssh.send_file(
self.path['AGENT_LOCAL_FOLDER'] + '/agent.py', self.path['AGENT_LOCAL_FOLDER'] + '/agent.py',
self.path['AGENT_REMOTE_FOLDER'] + '/agent.py' self.path['AGENT_REMOTE_FOLDER'] + '/agent.py')
)
self.ssh.send_file( self.ssh.send_file(
agent_config, agent_config, self.path['AGENT_REMOTE_FOLDER'] + '/agent.cfg')
self.path['AGENT_REMOTE_FOLDER'] +
'/agent.cfg')
self.ssh.send_file( self.ssh.send_file(
startup_config, startup_config,
self.path['AGENT_REMOTE_FOLDER'] + self.path['AGENT_REMOTE_FOLDER'] + '/agent_startup.cfg')
'/agent_startup.cfg')
self.ssh.send_file( self.ssh.send_file(
customs_script, customs_script,
self.path['AGENT_REMOTE_FOLDER'] + self.path['AGENT_REMOTE_FOLDER'] + '/agent_customs.sh')
'/agent_customs.sh')
except Exception: except Exception:
logger.error( logger.error(
"Failed to install agent on %s", "Failed to install agent on %s", self.host, exc_info=True)
self.host,
exc_info=True)
return None, None, None return None, None, None
return agent_config, startup_config, customs_script return agent_config, startup_config, customs_script
@ -300,9 +284,7 @@ class SSHClient(object):
host=self.host) host=self.host)
logging.debug('Command to start agent: %s', command) logging.debug('Command to start agent: %s', command)
self.session = self.ssh.async_session(command) self.session = self.ssh.async_session(command)
self.reader_thread = threading.Thread( self.reader_thread = threading.Thread(target=self.read_buffer)
target=self.read_buffer
)
self.reader_thread.setDaemon(True) self.reader_thread.setDaemon(True)
return self.session return self.session
@ -339,12 +321,9 @@ class SSHClient(object):
exc_info=True) exc_info=True)
try: try:
self.ssh.get_file( self.ssh.get_file(
self.path['AGENT_REMOTE_FOLDER'] + self.path['AGENT_REMOTE_FOLDER'] + "/_agent.log", log_filename)
"/_agent.log",
log_filename)
self.ssh.get_file( self.ssh.get_file(
self.path['AGENT_REMOTE_FOLDER'] + self.path['AGENT_REMOTE_FOLDER'] + "/monitoring.rawdata",
"/monitoring.rawdata",
data_filename) data_filename)
self.ssh.rm_r(self.path['AGENT_REMOTE_FOLDER']) self.ssh.rm_r(self.path['AGENT_REMOTE_FOLDER'])
except Exception: except Exception:

View File

@ -38,10 +38,7 @@ class MonitoringCollector(object):
self.load_start_time = None self.load_start_time = None
self.config_manager = ConfigManager() self.config_manager = ConfigManager()
self.old_style_configs = False self.old_style_configs = False
self.clients = { self.clients = {'localhost': LocalhostClient, 'ssh': SSHClient}
'localhost': LocalhostClient,
'ssh': SSHClient
}
def add_listener(self, obj): def add_listener(self, obj):
self.listeners.append(obj) self.listeners.append(obj)
@ -103,8 +100,9 @@ class MonitoringCollector(object):
} }
self.send_data.append(ready_to_send) self.send_data.append(ready_to_send)
logger.debug('Polling/decoding agents data took: %.2fms', logger.debug(
(time.time() - start_time) * 1000) 'Polling/decoding agents data took: %.2fms',
(time.time() - start_time) * 1000)
collected_data_length = len(self.send_data) collected_data_length = len(self.send_data)
@ -132,8 +130,10 @@ class MonitoringCollector(object):
def send_collected_data(self): def send_collected_data(self):
"""sends pending data set to listeners""" """sends pending data set to listeners"""
[listener.monitoring_data(self.send_data) [
for listener in self.listeners] listener.monitoring_data(self.send_data)
for listener in self.listeners
]
self.send_data = [] self.send_data = []

View File

@ -50,20 +50,22 @@ class ConfigManager(object):
}, },
"Memory": { "Memory": {
"name": '[inputs.mem]', "name": '[inputs.mem]',
"fielddrop": '["active", "inactive", "total", "used_per*", "avail*"]', "fielddrop":
'["active", "inactive", "total", "used_per*", "avail*"]',
}, },
"Disk": { "Disk": {
"name": '[inputs.diskio]', "name": '[inputs.diskio]',
"devices": '[{devices}]'.format( "devices": '[{devices}]'.format(
devices=",".join(['"vda%s","sda%s"' % (num, num) for num in range(6)]) devices=",".join(
), ['"vda%s","sda%s"' % (num, num) for num in range(6)])),
}, },
"Net": { "Net": {
"name": '[inputs.net]', "name": '[inputs.net]',
"interfaces": '[{interfaces}]'.format( "interfaces": '[{interfaces}]'.format(
interfaces=",".join(['"eth%s"' % (num) for num in range(6)]) interfaces=",".join(
), ['"eth%s"' % (num) for num in range(6)])),
"fielddrop": '["icmp*", "ip*", "udplite*", "tcp*", "udp*", "drop*", "err*"]', "fielddrop":
'["icmp*", "ip*", "udplite*", "tcp*", "udp*", "drop*", "err*"]',
}, },
"Nstat": { "Nstat": {
"name": '[inputs.nstat]', "name": '[inputs.nstat]',
@ -89,17 +91,15 @@ class ConfigManager(object):
} }
defaults_enabled = ['CPU', 'Memory', 'Disk', 'Net', 'System', 'Kernel'] defaults_enabled = ['CPU', 'Memory', 'Disk', 'Net', 'System', 'Kernel']
defaults_boolean = [ defaults_boolean = [
'percpu', 'percpu', 'round_interval', 'fielddrop', 'fieldpass', 'interfaces',
'round_interval', 'devices'
'fielddrop', ]
'fieldpass',
'interfaces',
'devices']
hostname = host.get('address').lower() hostname = host.get('address').lower()
if hostname == '[target]': if hostname == '[target]':
if not target_hint: if not target_hint:
raise ValueError( raise ValueError(
"Can't use `[target]` keyword with no target parameter specified") "Can't use `[target]` keyword with no target parameter specified"
)
logger.debug("Using target hint: %s", target_hint) logger.debug("Using target hint: %s", target_hint)
hostname = target_hint.lower() hostname = target_hint.lower()
custom = [] custom = []
@ -113,14 +113,12 @@ class ConfigManager(object):
if key != 'name' and key not in defaults_boolean: if key != 'name' and key not in defaults_boolean:
value = metric.get(key, None) value = metric.get(key, None)
if value: if value:
defaults[ defaults[metric.tag][key] = "'{value}'".format(
metric.tag][key] = "'{value}'".format(
value=value) value=value)
elif key in defaults_boolean: elif key in defaults_boolean:
value = metric.get(key, None) value = metric.get(key, None)
if value: if value:
defaults[ defaults[metric.tag][key] = "{value}".format(
metric.tag][key] = "{value}".format(
value=value) value=value)
host_config[metric.tag] = defaults[metric.tag] host_config[metric.tag] = defaults[metric.tag]
# custom metrics # custom metrics
@ -186,11 +184,15 @@ class AgentConfig(object):
# FIXME incinerate such a string formatting inside a method call # FIXME incinerate such a string formatting inside a method call
# T_T # T_T
config.add_section('startup') config.add_section('startup')
[config.set('startup', "cmd%s" % idx, cmd) [
for idx, cmd in enumerate(self.startups)] config.set('startup', "cmd%s" % idx, cmd)
for idx, cmd in enumerate(self.startups)
]
config.add_section('shutdown') config.add_section('shutdown')
[config.set('shutdown', "cmd%s" % idx, cmd) [
for idx, cmd in enumerate(self.shutdowns)] config.set('shutdown', "cmd%s" % idx, cmd)
for idx, cmd in enumerate(self.shutdowns)
]
with open(cfg_path, 'w') as fds: with open(cfg_path, 'w') as fds:
config.write(fds) config.write(fds)
@ -210,16 +212,14 @@ class AgentConfig(object):
if os.path.isfile(cfg_path): if os.path.isfile(cfg_path):
logger.info( logger.info(
'Found agent custom execs config file in working directory with the same name as created for host %s.\n' 'Found agent custom execs config file in working directory with the same name as created for host %s.\n'
'Creating new one via tempfile. This will affect predictable filenames for agent artefacts', self.host) 'Creating new one via tempfile. This will affect predictable filenames for agent artefacts',
self.host)
handle, cfg_path = tempfile.mkstemp('.sh', 'agent_customs_') handle, cfg_path = tempfile.mkstemp('.sh', 'agent_customs_')
os.close(handle) os.close(handle)
cmds = "" cmds = ""
for idx, cmd in enumerate(self.custom): for idx, cmd in enumerate(self.custom):
cmds += "-{idx}) {cmd};;\n".format( cmds += "-{idx}) {cmd};;\n".format(idx=idx, cmd=cmd['cmd'])
idx=idx,
cmd=cmd['cmd']
)
customs_script = """ customs_script = """
#!/bin/sh #!/bin/sh
while : while :
@ -263,8 +263,7 @@ class AgentConfig(object):
config.set( config.set(
"agent", "agent",
"interval", "interval",
"'{interval}s'".format( "'{interval}s'".format(interval=self.interval))
interval=self.interval))
config.set("agent", "round_interval", "true") config.set("agent", "round_interval", "true")
config.set("agent", "flush_interval", "'1s'") config.set("agent", "flush_interval", "'1s'")
config.set("agent", "collection_jitter", "'0s'") config.set("agent", "collection_jitter", "'0s'")
@ -280,9 +279,10 @@ class AgentConfig(object):
if key != 'name': if key != 'name':
config.set( config.set(
"{section_name}".format( "{section_name}".format(
section_name=self.host_config[section]['name']), "{key}".format( section_name=self.host_config[section][
key=key), "{value}".format( 'name']),
value=value)) "{key}".format(key=key),
"{value}".format(value=value))
# monitoring-style config # monitoring-style config
else: else:
if section in defaults_old_enabled: if section in defaults_old_enabled:
@ -291,23 +291,22 @@ class AgentConfig(object):
section_name=self.host_config[section]['name'])) section_name=self.host_config[section]['name']))
for key, value in iteritems(self.host_config[section]): for key, value in iteritems(self.host_config[section]):
if key in [ if key in [
'fielddrop', 'fielddrop', 'fieldpass', 'percpu',
'fieldpass', 'devices', 'interfaces'
'percpu', ]:
'devices',
'interfaces']:
config.set( config.set(
"{section_name}".format( "{section_name}".format(
section_name=self.host_config[section]['name']), "{key}".format( section_name=self.host_config[section][
key=key), "{value}".format( 'name']),
value=value)) "{key}".format(key=key),
"{value}".format(value=value))
# outputs # outputs
config.add_section("[outputs.file]") config.add_section("[outputs.file]")
config.set("[outputs.file]", config.set(
"files", "[outputs.file]",
"['{config}']".format( "files",
config=self.monitoring_data_output)) "['{config}']".format(config=self.monitoring_data_output))
config.set("[outputs.file]", "data_format", "'json'") config.set("[outputs.file]", "data_format", "'json'")
with open(cfg_path, 'w') as fds: with open(cfg_path, 'w') as fds:

View File

@ -6,7 +6,6 @@ logger = logging.getLogger(__name__)
class MetricsDecoder(object): class MetricsDecoder(object):
def __init__(self): def __init__(self):
""" """
translates telegraf metric names into common Monitoring metric names translates telegraf metric names into common Monitoring metric names
@ -49,32 +48,22 @@ class MetricsDecoder(object):
self.diff_metrics = { self.diff_metrics = {
'cpu': [], 'cpu': [],
'mem': [], 'mem': [],
'net': [ 'net': ['packets_recv', 'packets_sent', 'bytes_recv', 'bytes_sent'],
'packets_recv',
'packets_sent',
'bytes_recv',
'bytes_sent'],
'nstat': ['TcpRetransSegs'], 'nstat': ['TcpRetransSegs'],
'net_response': [], 'net_response': [],
'kernel': [ 'kernel': ['context_switches', 'interrupts', 'processes_forked'],
'context_switches',
'interrupts',
'processes_forked'],
'diskio': [ 'diskio': [
'read_bytes', 'read_bytes', 'write_bytes', 'io_time', 'read_time', 'reads',
'write_bytes', 'write_time', 'writes'
'io_time', ],
'read_time', 'custom': []
'reads', }
'write_time',
'writes'],
'custom': []}
def find_common_names(self, key): def find_common_names(self, key):
if key in self.known_metrics: if key in self.known_metrics:
return self.known_metrics[key] return self.known_metrics[key]
else: else:
return 'custom:{}'. format(key) return 'custom:{}'.format(key)
decoder = MetricsDecoder() decoder = MetricsDecoder()

View File

@ -24,7 +24,6 @@ if sys.version_info[0] < 3:
else: else:
from configparser import NoOptionError from configparser import NoOptionError
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -54,8 +53,7 @@ class Plugin(AbstractPlugin):
if self.monitoring: if self.monitoring:
self.monitoring.load_start_time = time.time() self.monitoring.load_start_time = time.time()
logger.debug( logger.debug(
"load_start_time = %s", "load_start_time = %s", self.monitoring.load_start_time)
self.monitoring.load_start_time)
def get_available_options(self): def get_available_options(self):
return ["config", "default_target", "ssh_timeout"] return ["config", "default_target", "ssh_timeout"]
@ -77,8 +75,9 @@ class Plugin(AbstractPlugin):
is_monitoring = None is_monitoring = None
if is_telegraf and is_monitoring: if is_telegraf and is_monitoring:
raise ValueError('Both telegraf and monitoring configs specified. ' raise ValueError(
'Clean up your config and delete one of them') 'Both telegraf and monitoring configs specified. '
'Clean up your config and delete one of them')
if is_telegraf and not is_monitoring: if is_telegraf and not is_monitoring:
return 'telegraf' return 'telegraf'
if not is_telegraf and is_monitoring: if not is_telegraf and is_monitoring:
@ -111,8 +110,7 @@ class Plugin(AbstractPlugin):
self.detected_conf = self.__detect_configuration() self.detected_conf = self.__detect_configuration()
if self.detected_conf: if self.detected_conf:
logging.info( logging.info(
'Detected monitoring configuration: %s', 'Detected monitoring configuration: %s', self.detected_conf)
self.detected_conf)
self.SECTION = self.detected_conf self.SECTION = self.detected_conf
self.config = self.get_option("config", "auto").strip() self.config = self.get_option("config", "auto").strip()
self.default_target = self.get_option("default_target", "localhost") self.default_target = self.get_option("default_target", "localhost")
@ -133,16 +131,21 @@ class Plugin(AbstractPlugin):
else: else:
if self.config.lower() == "auto": if self.config.lower() == "auto":
self.die_on_fail = False self.die_on_fail = False
with open(resource.resource_filename(self.default_config), 'rb') as def_config: with open(
resource.resource_filename(self.default_config),
'rb') as def_config:
config_contents = def_config.read() config_contents = def_config.read()
else: else:
with open(resource.resource_filename(self.config), 'rb') as config: with open(resource.resource_filename(self.config),
'rb') as config:
config_contents = config.read() config_contents = config.read()
# dump config contents into a file # dump config contents into a file
xmlfile = self.core.mkstemp(".xml", "monitoring_") xmlfile = self.core.mkstemp(".xml", "monitoring_")
self.core.add_artifact_file(xmlfile) self.core.add_artifact_file(xmlfile)
with open(xmlfile, "wb") as f: # output file should be in binary mode to support py3 with open(
xmlfile, "wb"
) as f: # output file should be in binary mode to support py3
f.write(config_contents) f.write(config_contents)
self.config = xmlfile self.config = xmlfile
@ -169,8 +172,7 @@ class Plugin(AbstractPlugin):
if info: if info:
self.default_target = info.address self.default_target = info.address
logger.debug( logger.debug(
"Changed monitoring target to %s", "Changed monitoring target to %s", self.default_target)
self.default_target)
self.monitoring.config = self.config self.monitoring.config = self.config
if self.default_target: if self.default_target:
@ -325,11 +327,11 @@ class MonitoringWidget(AbstractInfoWidget, MonitoringDataListener):
res = "Monitoring is " + screen.markup.GREEN + \ res = "Monitoring is " + screen.markup.GREEN + \
"online" + screen.markup.RESET + ":\n" "online" + screen.markup.RESET + ":\n"
for hostname, metrics in self.data.items(): for hostname, metrics in self.data.items():
tm_stamp = datetime.datetime.fromtimestamp(float(self.time[ tm_stamp = datetime.datetime.fromtimestamp(
hostname])).strftime('%H:%M:%S') float(self.time[hostname])).strftime('%H:%M:%S')
res += ( res += (
" " + screen.markup.CYAN + "%s" + " " + screen.markup.CYAN + "%s" + screen.markup.RESET +
screen.markup.RESET + " at %s:\n") % (hostname, tm_stamp) " at %s:\n") % (hostname, tm_stamp)
for metric, value in sorted(metrics.iteritems()): for metric, value in sorted(metrics.iteritems()):
if self.sign[hostname][metric] > 0: if self.sign[hostname][metric] > 0:
value = screen.markup.YELLOW + value + screen.markup.RESET value = screen.markup.YELLOW + value + screen.markup.RESET
@ -383,9 +385,10 @@ class AbstractMetricCriterion(AbstractCriterion, MonitoringDataListener):
if self.metric not in data.keys() or not data[self.metric]: if self.metric not in data.keys() or not data[self.metric]:
data[self.metric] = 0 data[self.metric] = 0
logger.debug("Compare %s %s/%s=%s to %s", self.get_type_string(), logger.debug(
host, self.metric, data[self.metric], "Compare %s %s/%s=%s to %s",
self.value_limit) self.get_type_string(), host, self.metric, data[self.metric],
self.value_limit)
if self.comparison_fn(float(data[self.metric]), self.value_limit): if self.comparison_fn(float(data[self.metric]), self.value_limit):
if not self.seconds_count: if not self.seconds_count:
self.cause_second = self.last_second self.cause_second = self.last_second
@ -430,8 +433,9 @@ class MetricHigherCriterion(AbstractMetricCriterion):
return "%s/%s metric value is higher than %s for %s seconds" % items return "%s/%s metric value is higher than %s for %s seconds" % items
def widget_explain(self): def widget_explain(self):
items = (self.host, self.metric, self.value_limit, self.seconds_count, items = (
self.seconds_limit) self.host, self.metric, self.value_limit, self.seconds_count,
self.seconds_limit)
return "%s/%s > %s for %s/%ss" % items, float( return "%s/%s > %s for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit self.seconds_count) / self.seconds_limit
@ -457,8 +461,9 @@ class MetricLowerCriterion(AbstractMetricCriterion):
return "%s/%s metric value is lower than %s for %s seconds" % items return "%s/%s metric value is lower than %s for %s seconds" % items
def widget_explain(self): def widget_explain(self):
items = (self.host, self.metric, self.value_limit, self.seconds_count, items = (
self.seconds_limit) self.host, self.metric, self.value_limit, self.seconds_count,
self.seconds_limit)
return "%s/%s < %s for %s/%ss" % items, float( return "%s/%s < %s for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit self.seconds_count) / self.seconds_limit

View File

@ -7,12 +7,10 @@ import json
from ..Telegraf.decoder import decoder from ..Telegraf.decoder import decoder
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class MonitoringReader(object): class MonitoringReader(object):
def __init__(self, source): def __init__(self, source):
self.buffer = [] self.buffer = []
self.source = source self.source = source
@ -43,11 +41,12 @@ class MonitoringReader(object):
# key_group sample: diskio # key_group sample: diskio
# key_name sample: io_time # key_name sample: io_time
try: try:
key_group, key_name = key.split('_')[0].split( key_group, key_name = key.split('_')[
'-')[0], '_'.join(key.split('_')[1:]) 0].split('-')[0], '_'.join(
key.split('_')[1:])
except: except:
key_group, key_name = key.split( key_group, key_name = key.split('_')[
'_')[0], '_'.join(key.split('_')[1:]) 0], '_'.join(key.split('_')[1:])
if key_group in decoder.diff_metrics.keys(): if key_group in decoder.diff_metrics.keys():
if key_name in decoder.diff_metrics[ if key_name in decoder.diff_metrics[
key_group]: key_group]:
@ -60,7 +59,10 @@ class MonitoringReader(object):
except KeyError: except KeyError:
logger.debug( logger.debug(
'There is no diff value for metric %s.\n' 'There is no diff value for metric %s.\n'
'Timestamp: %s. Is it initial data?', key, ts, exc_info=True) 'Timestamp: %s. Is it initial data?',
key,
ts,
exc_info=True)
value = 0 value = 0
prepared_results[ prepared_results[
decoded_key] = value decoded_key] = value
@ -69,8 +71,7 @@ class MonitoringReader(object):
key) key)
prepared_results[decoded_key] = value prepared_results[decoded_key] = value
else: else:
decoded_key = decoder.find_common_names( decoded_key = decoder.find_common_names(key)
key)
prepared_results[decoded_key] = value prepared_results[decoded_key] = value
self.prev_check = jsn[ts] self.prev_check = jsn[ts]
collect.append((ts, prepared_results)) collect.append((ts, prepared_results))
@ -78,8 +79,7 @@ class MonitoringReader(object):
logger.error( logger.error(
'Telegraf agent send trash to output: %s', chunk) 'Telegraf agent send trash to output: %s', chunk)
logger.debug( logger.debug(
'Telegraf agent data block w/ trash: %s', 'Telegraf agent data block w/ trash: %s', exc_info=True)
exc_info=True)
return [] return []
except: except:
logger.error( logger.error(

View File

@ -7,7 +7,6 @@ else:
class TestConfigManager(object): class TestConfigManager(object):
def test_rawxml_parse(self): def test_rawxml_parse(self):
""" raw xml read from string """ """ raw xml read from string """
manager = ConfigManager() manager = ConfigManager()
@ -29,8 +28,7 @@ class TestConfigManager(object):
'yandextank/plugins/Telegraf/tests/old_mon.xml', 'sometargethint') 'yandextank/plugins/Telegraf/tests/old_mon.xml', 'sometargethint')
assert ( assert (
configs[0]['host'] == 'somehost.yandex.tld' and configs[0]['host'] == 'somehost.yandex.tld' and
configs[0]['host_config']['CPU']['name'] == '[inputs.cpu]' configs[0]['host_config']['CPU']['name'] == '[inputs.cpu]')
)
def test_xml_telegraf_parse(self): def test_xml_telegraf_parse(self):
""" telegraf-style monitoring xml parse """ """ telegraf-style monitoring xml parse """
@ -40,8 +38,7 @@ class TestConfigManager(object):
'sometargethint') 'sometargethint')
assert ( assert (
configs[0]['host'] == 'somehost.yandex.tld' and configs[0]['host'] == 'somehost.yandex.tld' and
configs[0]['host_config']['CPU']['name'] == '[inputs.cpu]' configs[0]['host_config']['CPU']['name'] == '[inputs.cpu]')
)
def test_target_hint(self): def test_target_hint(self):
""" test target hint (special address=[target] option) """ """ test target hint (special address=[target] option) """
@ -53,7 +50,6 @@ class TestConfigManager(object):
class TestAgentConfig(object): class TestAgentConfig(object):
def test_create_startup_configs(self): def test_create_startup_configs(self):
""" test agent config creates startup config """ """ test agent config creates startup config """
manager = ConfigManager() manager = ConfigManager()
@ -81,9 +77,8 @@ class TestAgentConfig(object):
cfg_parser.has_section('agent') and cfg_parser.has_section('agent') and
cfg_parser.get('agent', 'interval') == "'1s'" and cfg_parser.get('agent', 'interval') == "'1s'" and
cfg_parser.has_section('[outputs.file') and cfg_parser.has_section('[outputs.file') and
cfg_parser.get( cfg_parser.get('[outputs.file', 'files') ==
'[outputs.file', 'files') == "['{rmt}/monitoring.rawdata']".format(rmt=remote_workdir) "['{rmt}/monitoring.rawdata']".format(rmt=remote_workdir))
)
def test_create_custom_exec_script(self): def test_create_custom_exec_script(self):
""" test agent config creates custom_exec config """ """ test agent config creates custom_exec config """

View File

@ -3,14 +3,12 @@ from yandextank.plugins.Telegraf import Plugin as TelegrafPlugin
class TestTelegrafPlugin(object): class TestTelegrafPlugin(object):
def test_plugin_configuration(self): def test_plugin_configuration(self):
""" testing telegraf plugin configuration """ """ testing telegraf plugin configuration """
core = TankCore() core = TankCore()
telegraf_plugin = TelegrafPlugin(core) telegraf_plugin = TelegrafPlugin(core)
core.set_option( core.set_option(
'telegraf', 'telegraf', 'config',
'config',
'yandextank/plugins/Telegraf/tests/telegraf_mon.xml') 'yandextank/plugins/Telegraf/tests/telegraf_mon.xml')
telegraf_plugin.configure() telegraf_plugin.configure()
assert telegraf_plugin.detected_conf == 'telegraf' assert telegraf_plugin.detected_conf == 'telegraf'
@ -20,8 +18,7 @@ class TestTelegrafPlugin(object):
core = TankCore() core = TankCore()
telegraf_plugin = TelegrafPlugin(core) telegraf_plugin = TelegrafPlugin(core)
core.set_option( core.set_option(
'monitoring', 'monitoring', 'config',
'config',
'yandextank/plugins/Telegraf/tests/old_mon.xml') 'yandextank/plugins/Telegraf/tests/old_mon.xml')
telegraf_plugin.configure() telegraf_plugin.configure()
assert telegraf_plugin.detected_conf == 'monitoring' assert telegraf_plugin.detected_conf == 'monitoring'
@ -31,12 +28,10 @@ class TestTelegrafPlugin(object):
core = TankCore() core = TankCore()
telegraf_plugin = TelegrafPlugin(core) telegraf_plugin = TelegrafPlugin(core)
core.set_option( core.set_option(
'monitoring', 'monitoring', 'config',
'config',
'yandextank/plugins/Telegraf/tests/old_mon.xml') 'yandextank/plugins/Telegraf/tests/old_mon.xml')
core.set_option( core.set_option(
'telegraf', 'telegraf', 'config',
'config',
'yandextank/plugins/Telegraf/tests/telegraf_mon.xml') 'yandextank/plugins/Telegraf/tests/telegraf_mon.xml')
try: try:
telegraf_plugin.configure() telegraf_plugin.configure()

View File

@ -1 +1 @@
from plugin import * # noqa:F401,F403 from plugin import * # noqa:F401,F403

View File

@ -19,8 +19,10 @@ class Plugin(AbstractPlugin, AbstractInfoWidget):
def __init__(self, core): def __init__(self, core):
AbstractPlugin.__init__(self, core) AbstractPlugin.__init__(self, core)
AbstractInfoWidget.__init__(self) AbstractInfoWidget.__init__(self)
self.lines = [l.decode( self.lines = [
'utf-8') for l in resource_stream(__name__, "config/tips.txt").readlines()] l.decode('utf-8')
for l in resource_stream(__name__, "config/tips.txt").readlines()
]
self.disable = 0 self.disable = 0
line = random.choice(self.lines) line = random.choice(self.lines)
@ -60,6 +62,6 @@ class Plugin(AbstractPlugin, AbstractInfoWidget):
self.probability += 1e-3 self.probability += 1e-3
line = screen.markup.WHITE + "Tips & Tricks => " + \ line = screen.markup.WHITE + "Tips & Tricks => " + \
self.section + screen.markup.RESET + ":\n " self.section + screen.markup.RESET + ":\n "
line += "\n ".join(textwrap.wrap(self.tip, screen.right_panel_width - line += "\n ".join(
2)) textwrap.wrap(self.tip, screen.right_panel_width - 2))
return line return line

View File

@ -11,21 +11,21 @@ from .module_exceptions import StepperConfigurationError, AmmoFileError
class ComponentFactory(): class ComponentFactory():
def __init__(
def __init__(self, self,
rps_schedule=None, rps_schedule=None,
http_ver='1.1', http_ver='1.1',
ammo_file=None, ammo_file=None,
instances_schedule=None, instances_schedule=None,
instances=1000, instances=1000,
loop_limit=-1, loop_limit=-1,
ammo_limit=-1, ammo_limit=-1,
uris=None, uris=None,
headers=None, headers=None,
autocases=None, autocases=None,
enum_ammo=False, enum_ammo=False,
ammo_type='phantom', ammo_type='phantom',
chosen_cases=[], ): chosen_cases=[], ):
self.log = logging.getLogger(__name__) self.log = logging.getLogger(__name__)
self.ammo_file = ammo_file self.ammo_file = ammo_file
self.ammo_type = ammo_type self.ammo_type = ammo_type
@ -57,7 +57,8 @@ class ComponentFactory():
""" """
if self.rps_schedule and self.instances_schedule: if self.rps_schedule and self.instances_schedule:
raise StepperConfigurationError( raise StepperConfigurationError(
'Both rps and instances schedules specified. You must specify only one of them') 'Both rps and instances schedules specified. You must specify only one of them'
)
elif self.rps_schedule: elif self.rps_schedule:
info.status.publish('loadscheme', self.rps_schedule) info.status.publish('loadscheme', self.rps_schedule)
return lp.create(self.rps_schedule) return lp.create(self.rps_schedule)
@ -84,11 +85,11 @@ class ComponentFactory():
} }
if self.uris and self.ammo_file: if self.uris and self.ammo_file:
raise StepperConfigurationError( raise StepperConfigurationError(
'Both uris and ammo file specified. You must specify only one of them') 'Both uris and ammo file specified. You must specify only one of them'
)
elif self.uris: elif self.uris:
ammo_gen = missile.UriStyleGenerator(self.uris, ammo_gen = missile.UriStyleGenerator(
self.headers, self.uris, self.headers, http_ver=self.http_ver)
http_ver=self.http_ver)
elif self.ammo_file: elif self.ammo_file:
if self.ammo_type in af_readers: if self.ammo_type in af_readers:
if self.ammo_type == 'phantom': if self.ammo_type == 'phantom':
@ -98,10 +99,12 @@ class ComponentFactory():
if not ammo.next()[0].isdigit(): if not ammo.next()[0].isdigit():
self.ammo_type = 'uri' self.ammo_type = 'uri'
self.log.info( self.log.info(
"Setting ammo_type 'uri' because ammo is not started with digit and you did not specify ammo format") "Setting ammo_type 'uri' because ammo is not started with digit and you did not specify ammo format"
)
else: else:
self.log.info( self.log.info(
"Default ammo type ('phantom') used, use 'phantom.ammo_type' option to override it") "Default ammo type ('phantom') used, use 'phantom.ammo_type' option to override it"
)
except StopIteration: except StopIteration:
self.log.exception( self.log.exception(
"Couldn't read first line of ammo file") "Couldn't read first line of ammo file")
@ -110,9 +113,8 @@ class ComponentFactory():
else: else:
raise NotImplementedError( raise NotImplementedError(
'No such ammo type implemented: "%s"' % self.ammo_type) 'No such ammo type implemented: "%s"' % self.ammo_type)
ammo_gen = af_readers[self.ammo_type](self.ammo_file, ammo_gen = af_readers[self.ammo_type](
headers=self.headers, self.ammo_file, headers=self.headers, http_ver=self.http_ver)
http_ver=self.http_ver)
else: else:
raise StepperConfigurationError( raise StepperConfigurationError(
'Ammo not found. Specify uris or ammo file') 'Ammo not found. Specify uris or ammo file')

View File

@ -15,8 +15,9 @@ class Stpd(object):
self.af = ammo_factory self.af = ammo_factory
def __iter__(self): def __iter__(self):
return ("%s %s %s\n%s\n" % (len(missile), timestamp, marker, missile) return (
for timestamp, marker, missile in self.af) "%s %s %s\n%s\n" % (len(missile), timestamp, marker, missile)
for timestamp, marker, missile in self.af)
class StpdReader(object): class StpdReader(object):
@ -36,6 +37,7 @@ class StpdReader(object):
return line # EOF return line # EOF
chunk_header = line.strip('\r\n') chunk_header = line.strip('\r\n')
return chunk_header return chunk_header
with open(self.filename, 'rb') as ammo_file: with open(self.filename, 'rb') as ammo_file:
chunk_header = read_chunk_header(ammo_file) chunk_header = read_chunk_header(ammo_file)
while chunk_header != '': while chunk_header != '':
@ -47,12 +49,12 @@ class StpdReader(object):
missile = ammo_file.read(chunk_size) missile = ammo_file.read(chunk_size)
if len(missile) < chunk_size: if len(missile) < chunk_size:
raise StpdFileError( raise StpdFileError(
"Unexpected end of file: read %s bytes instead of %s" % "Unexpected end of file: read %s bytes instead of %s"
(len(missile), chunk_size)) % (len(missile), chunk_size))
yield (timestamp, missile, marker) yield (timestamp, missile, marker)
except (IndexError, ValueError) as e: except (IndexError, ValueError) as e:
raise StpdFileError( raise StpdFileError(
"Error while reading ammo file. Position: %s, header: '%s', original exception: %s" % "Error while reading ammo file. Position: %s, header: '%s', original exception: %s"
(ammo_file.tell(), chunk_header, e)) % (ammo_file.tell(), chunk_header, e))
chunk_header = read_chunk_header(ammo_file) chunk_header = read_chunk_header(ammo_file)
self.log.info("Reached the end of stpd file") self.log.info("Reached the end of stpd file")

View File

@ -38,8 +38,8 @@ class StepperStatus(object):
def publish(self, key, value): def publish(self, key, value):
if key not in self.info: if key not in self.info:
raise RuntimeError("Tried to publish to a non-existent key: %s" % raise RuntimeError(
key) "Tried to publish to a non-existent key: %s" % key)
log.debug('Published %s to %s', value, key) log.debug('Published %s to %s', value, key)
self.info[key] = value self.info[key] = value
@ -88,8 +88,8 @@ class StepperStatus(object):
self.info['loop_count'] = self._loop_count self.info['loop_count'] = self._loop_count
for key in self.info: for key in self.info:
if self.info[key] is None: if self.info[key] is None:
raise RuntimeError("Information for %s is not published yet." % raise RuntimeError(
key) "Information for %s is not published yet." % key)
return StepperInfo(**self.info) return StepperInfo(**self.info)
def update_view(self): def update_view(self):
@ -100,15 +100,16 @@ class StepperStatus(object):
self._timer = cur_time self._timer = cur_time
if time_delta > 0: if time_delta > 0:
stdout.write( stdout.write(
"AF: %3s%%, LP: %3s%%, loops: %10s, speed: %5s Krps\r" % "AF: %3s%%, LP: %3s%%, loops: %10s, speed: %5s Krps\r" % (
(self.af_progress, self.lp_progress, self.loop_count, int( self.af_progress, self.lp_progress, self.loop_count,
ammo_generated / time_delta / 1000.0))) int(ammo_generated / time_delta / 1000.0)))
stdout.flush() stdout.flush()
if self.core: if self.core:
self.core.publish("stepper", "progress", self.lp_progress) self.core.publish("stepper", "progress", self.lp_progress)
self.core.publish("stepper", "loop_count", self.loop_count) self.core.publish("stepper", "loop_count", self.loop_count)
self.core.publish("stepper", "speed", "%s Krps" % self.core.publish(
int(ammo_generated / time_delta / 1000.0)) "stepper", "speed",
"%s Krps" % int(ammo_generated / time_delta / 1000.0))
def update_af_progress(self): def update_af_progress(self):
if self.af_size and self.loop_limit and self.af_position is not None: if self.af_size and self.loop_limit and self.af_position is not None:

View File

@ -9,7 +9,6 @@ from builtins import range
class LoadPlanBuilder(object): class LoadPlanBuilder(object):
def __init__(self): def __init__(self):
self.generators = [] self.generators = []
self.steps = [] self.steps = []
@ -33,15 +32,16 @@ class LoadPlanBuilder(object):
return self return self
def ramp(self, count, duration): def ramp(self, count, duration):
self.log.debug("Ramp %s instances in %sms from %sms" % self.log.debug(
(count, duration, self.duration)) "Ramp %s instances in %sms from %sms" %
(count, duration, self.duration))
if count < 0: if count < 0:
raise StepperConfigurationError( raise StepperConfigurationError(
"Can not stop instances in instances_schedule.") "Can not stop instances in instances_schedule.")
interval = float(duration) / (count - 1) interval = float(duration) / (count - 1)
start_time = self.duration start_time = self.duration
self.generators.append(int(start_time + i * interval) self.generators.append(
for i in range(0, count)) int(start_time + i * interval) for i in range(0, count))
self.steps += [(self.instances + i + 1, int(interval / 1000.0)) self.steps += [(self.instances + i + 1, int(interval / 1000.0))
for i in range(0, count)] for i in range(0, count)]
self.instances += count self.instances += count
@ -58,8 +58,8 @@ class LoadPlanBuilder(object):
self.ramp(final_instances - initial_instances + 1, duration) self.ramp(final_instances - initial_instances + 1, duration)
return self return self
def stairway(self, initial_instances, final_instances, step_size, def stairway(
step_duration): self, initial_instances, final_instances, step_size, step_duration):
step_count = (final_instances - initial_instances) // step_size step_count = (final_instances - initial_instances) // step_size
self.log.debug("Making a stairway: %s steps" % step_count) self.log.debug("Making a stairway: %s steps" % step_count)
self.start(initial_instances - self.instances) self.start(initial_instances - self.instances)
@ -79,7 +79,8 @@ class LoadPlanBuilder(object):
self.ramp(int(instances), parse_duration(interval)) self.ramp(int(instances), parse_duration(interval))
else: else:
self.log.info( self.log.info(
"Ramp step format: 'ramp(<instances_to_start>, <step_duration>)'") "Ramp step format: 'ramp(<instances_to_start>, <step_duration>)'"
)
raise StepperConfigurationError( raise StepperConfigurationError(
"Error in step configuration: 'ramp(%s'" % params) "Error in step configuration: 'ramp(%s'" % params)
@ -91,7 +92,8 @@ class LoadPlanBuilder(object):
self.const(int(instances), parse_duration(interval)) self.const(int(instances), parse_duration(interval))
else: else:
self.log.info( self.log.info(
"Const step format: 'const(<instances_count>, <step_duration>)'") "Const step format: 'const(<instances_count>, <step_duration>)'"
)
raise StepperConfigurationError( raise StepperConfigurationError(
"Error in step configuration: 'const(%s'" % params) "Error in step configuration: 'const(%s'" % params)
@ -112,11 +114,12 @@ class LoadPlanBuilder(object):
if s_res: if s_res:
initial_instances, final_instances, interval = s_res.groups() initial_instances, final_instances, interval = s_res.groups()
self.line( self.line(
int(initial_instances), int(final_instances), int(initial_instances),
parse_duration(interval)) int(final_instances), parse_duration(interval))
else: else:
self.log.info( self.log.info(
"Line step format: 'line(<initial_instances>, <final_instances>, <step_duration>)'") "Line step format: 'line(<initial_instances>, <final_instances>, <step_duration>)'"
)
raise StepperConfigurationError( raise StepperConfigurationError(
"Error in step configuration: 'line(%s'" % params) "Error in step configuration: 'line(%s'" % params)
@ -139,11 +142,13 @@ class LoadPlanBuilder(object):
initial_instances, final_instances, step_size, step_duration = s_res.groups( initial_instances, final_instances, step_size, step_duration = s_res.groups(
) )
self.stairway( self.stairway(
int(initial_instances), int(final_instances), int(initial_instances),
int(final_instances),
int(step_size), parse_duration(step_duration)) int(step_size), parse_duration(step_duration))
else: else:
self.log.info( self.log.info(
"Stairway step format: 'step(<initial_instances>, <final_instances>, <step_size>, <step_duration>)'") "Stairway step format: 'step(<initial_instances>, <final_instances>, <step_size>, <step_duration>)'"
)
raise StepperConfigurationError( raise StepperConfigurationError(
"Error in step configuration: 'step(%s'" % params) "Error in step configuration: 'step(%s'" % params)

View File

@ -21,8 +21,9 @@ class Const(object):
if self.rps == 0: if self.rps == 0:
return iter([]) return iter([])
interval = 1000.0 / self.rps interval = 1000.0 / self.rps
return (int(i * interval) return (
for i in range(0, int(self.rps * self.duration / 1000))) int(i * interval)
for i in range(0, int(self.rps * self.duration / 1000)))
def rps_at(self, t): def rps_at(self, t):
'''Return rps for second t''' '''Return rps for second t'''
@ -112,8 +113,8 @@ class Line(object):
:rtype: list :rtype: list
""" """
seconds = range(0, int(self.duration) + 1) seconds = range(0, int(self.duration) + 1)
rps_groups = groupby( rps_groups = groupby([proper_round(self.rps_at(t)) for t in seconds],
[proper_round(self.rps_at(t)) for t in seconds], lambda x: x) lambda x: x)
rps_list = [(rps, len(list(rpl))) for rps, rpl in rps_groups] rps_list = [(rps, len(list(rpl))) for rps, rpl in rps_groups]
return rps_list return rps_list
@ -140,12 +141,11 @@ class Composite(object):
return int(sum(step.__len__() for step in self.steps)) return int(sum(step.__len__() for step in self.steps))
def get_rps_list(self): def get_rps_list(self):
return list(chain.from_iterable(step.get_rps_list() return list(
for step in self.steps)) chain.from_iterable(step.get_rps_list() for step in self.steps))
class Stairway(Composite): class Stairway(Composite):
def __init__(self, minrps, maxrps, increment, step_duration): def __init__(self, minrps, maxrps, increment, step_duration):
if maxrps < minrps: if maxrps < minrps:
increment = -increment increment = -increment
@ -164,7 +164,6 @@ class Stairway(Composite):
class StepFactory(object): class StepFactory(object):
@staticmethod @staticmethod
def line(params): def line(params):
template = re.compile('([0-9.]+),\s*([0-9.]+),\s*([0-9.]+[dhms]?)+\)') template = re.compile('([0-9.]+),\s*([0-9.]+),\s*([0-9.]+[dhms]?)+\)')
@ -183,8 +182,8 @@ class StepFactory(object):
'([0-9.]+),\s*([0-9.]+),\s*([0-9.]+),\s*([0-9.]+[dhms]?)+\)') '([0-9.]+),\s*([0-9.]+),\s*([0-9.]+),\s*([0-9.]+[dhms]?)+\)')
minrps, maxrps, increment, duration = template.search(params).groups() minrps, maxrps, increment, duration = template.search(params).groups()
return Stairway( return Stairway(
float(minrps), float(maxrps), float(increment), float(minrps),
parse_duration(duration)) float(maxrps), float(increment), parse_duration(duration))
@staticmethod @staticmethod
def produce(step_config): def produce(step_config):
@ -198,8 +197,8 @@ class StepFactory(object):
if load_type in _plans: if load_type in _plans:
return _plans[load_type](params) return _plans[load_type](params)
else: else:
raise NotImplementedError('No such load type implemented: "%s"' % raise NotImplementedError(
load_type) 'No such load type implemented: "%s"' % load_type)
def create(rps_schedule): def create(rps_schedule):
@ -207,8 +206,8 @@ def create(rps_schedule):
Create Load Plan as defined in schedule. Publish info about its duration. Create Load Plan as defined in schedule. Publish info about its duration.
""" """
if len(rps_schedule) > 1: if len(rps_schedule) > 1:
lp = Composite([StepFactory.produce(step_config) lp = Composite(
for step_config in rps_schedule]) [StepFactory.produce(step_config) for step_config in rps_schedule])
else: else:
lp = StepFactory.produce(rps_schedule[0]) lp = StepFactory.produce(rps_schedule[0])
info.status.publish('duration', lp.get_duration() / 1000) info.status.publish('duration', lp.get_duration() / 1000)

View File

@ -39,18 +39,18 @@ class AmmoFactory(object):
configured ComponentFactory, passed as a parameter to the configured ComponentFactory, passed as a parameter to the
__init__ method of this class. __init__ method of this class.
''' '''
ammo_stream = (ammo ammo_stream = (
for ammo in ((missile, marker or self.marker(missile)) ammo
for missile, marker in self.ammo_generator) for ammo in ((missile, marker or self.marker(missile))
if self.filter(ammo)) for missile, marker in self.ammo_generator)
if self.filter(ammo))
return ((timestamp, marker or self.marker(missile), missile) return ((timestamp, marker or self.marker(missile), missile)
for timestamp, (missile, marker) in zip(self.load_plan, for timestamp, (missile, marker
ammo_stream)) ) in zip(self.load_plan, ammo_stream))
class Stepper(object): class Stepper(object):
def __init__(self, core, **kwargs): def __init__(self, core, **kwargs):
info.status = info.StepperStatus() info.status = info.StepperStatus()
info.status.core = core info.status.core = core
@ -118,19 +118,25 @@ class StepperWrapper(object):
def get_option(self, option_ammofile, param2=None): def get_option(self, option_ammofile, param2=None):
''' get_option wrapper''' ''' get_option wrapper'''
result = self.core.get_option(self.section, option_ammofile, param2) result = self.core.get_option(self.section, option_ammofile, param2)
self.log.debug("Option %s.%s = %s", self.section, option_ammofile, self.log.debug(
result) "Option %s.%s = %s", self.section, option_ammofile, result)
return result return result
@staticmethod @staticmethod
def get_available_options(): def get_available_options():
opts = [StepperWrapper.OPTION_AMMOFILE, StepperWrapper.OPTION_LOOP, opts = [
StepperWrapper.OPTION_SCHEDULE, StepperWrapper.OPTION_STPD, StepperWrapper.OPTION_AMMOFILE, StepperWrapper.OPTION_LOOP,
StepperWrapper.OPTION_INSTANCES_LIMIT] StepperWrapper.OPTION_SCHEDULE, StepperWrapper.OPTION_STPD,
opts += ["instances_schedule", "uris", "headers", "header_http", StepperWrapper.OPTION_INSTANCES_LIMIT
"autocases", "enum_ammo", "ammo_type", "ammo_limit"] ]
opts += ["use_caching", "cache_dir", "force_stepping", "file_cache", opts += [
"chosen_cases"] "instances_schedule", "uris", "headers", "header_http", "autocases",
"enum_ammo", "ammo_type", "ammo_limit"
]
opts += [
"use_caching", "cache_dir", "force_stepping", "file_cache",
"chosen_cases"
]
return opts return opts
def read_config(self): def read_config(self):
@ -150,12 +156,12 @@ class StepperWrapper(object):
steps.append(step.strip() + ')') steps.append(step.strip() + ')')
return steps return steps
self.rps_schedule = make_steps(self.get_option(self.OPTION_SCHEDULE, self.rps_schedule = make_steps(
'')) self.get_option(self.OPTION_SCHEDULE, ''))
self.instances_schedule = make_steps(self.get_option( self.instances_schedule = make_steps(
"instances_schedule", '')) self.get_option("instances_schedule", ''))
self.instances = int(self.get_option(self.OPTION_INSTANCES_LIMIT, self.instances = int(
'1000')) self.get_option(self.OPTION_INSTANCES_LIMIT, '1000'))
self.uris = self.get_option("uris", '').strip().split("\n") self.uris = self.get_option("uris", '').strip().split("\n")
while '' in self.uris: while '' in self.uris:
self.uris.remove('') self.uris.remove('')
@ -167,8 +173,8 @@ class StepperWrapper(object):
self.use_caching = int(self.get_option("use_caching", '1')) self.use_caching = int(self.get_option("use_caching", '1'))
self.file_cache = int(self.get_option('file_cache', '8192')) self.file_cache = int(self.get_option('file_cache', '8192'))
cache_dir = self.core.get_option(self.section, "cache_dir", cache_dir = self.core.get_option(
self.core.artifacts_base_dir) self.section, "cache_dir", self.core.artifacts_base_dir)
self.cache_dir = os.path.expanduser(cache_dir) self.cache_dir = os.path.expanduser(cache_dir)
self.force_stepping = int(self.get_option("force_stepping", '0')) self.force_stepping = int(self.get_option("force_stepping", '0'))
self.stpd = self.get_option(self.OPTION_STPD, "") self.stpd = self.get_option(self.OPTION_STPD, "")
@ -209,7 +215,8 @@ class StepperWrapper(object):
instances=self.instances) instances=self.instances)
publish_info(stepper_info) publish_info(stepper_info)
else: else:
if (self.force_stepping and if (
self.force_stepping and
os.path.exists(self.__si_filename())): os.path.exists(self.__si_filename())):
os.remove(self.__si_filename()) os.remove(self.__si_filename())
self.__make_stpd_file() self.__make_stpd_file()
@ -240,7 +247,8 @@ class StepperWrapper(object):
hashed_str += sep + str(self.ammo_limit) + sep + ';'.join( hashed_str += sep + str(self.ammo_limit) + sep + ';'.join(
self.rps_schedule) + sep + str(self.autocases) self.rps_schedule) + sep + str(self.autocases)
hashed_str += sep + ";".join(self.uris) + sep + ";".join( hashed_str += sep + ";".join(self.uris) + sep + ";".join(
self.headers) + sep + self.http_ver + sep + ";".join(self.chosen_cases) self.headers) + sep + self.http_ver + sep + ";".join(
self.chosen_cases)
hashed_str += sep + str(self.enum_ammo) + sep + str(self.ammo_type) hashed_str += sep + str(self.enum_ammo) + sep + str(self.ammo_type)
if self.instances_schedule: if self.instances_schedule:
hashed_str += sep + str(self.instances) hashed_str += sep + str(self.instances)

View File

@ -12,8 +12,8 @@ param1=50&param2=0&param3=hello
def __mark_by_uri(missile): def __mark_by_uri(missile):
return '_'.join(missile.split('\n', 1)[0].split(' ', 2)[1].split('?')[ return '_'.join(
0].split('/')) missile.split('\n', 1)[0].split(' ', 2)[1].split('?')[0].split('/'))
class __UriMarker(object): class __UriMarker(object):
@ -29,15 +29,18 @@ class __UriMarker(object):
self.limit = limit self.limit = limit
def __call__(self, missile): def __call__(self, missile):
return '_'.join(missile.split('\n', 1)[0].split(' ', 2)[1].split('?')[ return '_'.join(
0].split('/')[0:self.limit + 1]) missile.split('\n', 1)[0].split(' ', 2)[1].split('?')[0].split('/')[
0:self.limit + 1])
__markers = {'uniq': lambda m: uuid4().hex, 'uri': __mark_by_uri, } __markers = {
'uniq': lambda m: uuid4().hex,
'uri': __mark_by_uri,
}
class __Enumerator(object): class __Enumerator(object):
def __init__(self, marker): def __init__(self, marker):
self.marker = marker self.marker = marker
self.number = int(0) self.number = int(0)
@ -80,6 +83,7 @@ def get_marker(marker_type, enum_ammo=False):
if limit: if limit:
marker = __UriMarker(limit) marker = __UriMarker(limit)
else: else:
def marker(m): def marker(m):
return '' return ''
except ValueError: except ValueError:

View File

@ -46,8 +46,8 @@ class HttpAmmo(object):
headers = '\r\n'.join(self.headers) + '\r\n' headers = '\r\n'.join(self.headers) + '\r\n'
else: else:
headers = '' headers = ''
return "%s %s %s\r\n%s\r\n%s" % (self.method, self.uri, self.proto, return "%s %s %s\r\n%s\r\n%s" % (
headers, self.body) self.method, self.uri, self.proto, headers, self.body)
class SimpleGenerator(object): class SimpleGenerator(object):
@ -78,10 +78,9 @@ class UriStyleGenerator(object):
uris - a list of URIs as strings. uris - a list of URIs as strings.
''' '''
self.uri_count = len(uris) self.uri_count = len(uris)
self.missiles = cycle([(HttpAmmo(uri, self.missiles = cycle([(
headers, HttpAmmo(
http_ver=http_ver).to_s(), None) uri, headers, http_ver=http_ver).to_s(), None) for uri in uris])
for uri in uris])
def __iter__(self): def __iter__(self):
for m in self.missiles: for m in self.missiles:
@ -120,8 +119,8 @@ class AmmoFileReader(object):
if chunk_size == 0: if chunk_size == 0:
if info.status.loop_count == 0: if info.status.loop_count == 0:
self.log.info( self.log.info(
'Zero-sized chunk in ammo file at %s. Starting over.' % 'Zero-sized chunk in ammo file at %s. Starting over.'
ammo_file.tell()) % ammo_file.tell())
ammo_file.seek(0) ammo_file.seek(0)
info.status.inc_loop_count() info.status.inc_loop_count()
chunk_header = read_chunk_header(ammo_file) chunk_header = read_chunk_header(ammo_file)
@ -130,13 +129,13 @@ class AmmoFileReader(object):
missile = ammo_file.read(chunk_size) missile = ammo_file.read(chunk_size)
if len(missile) < chunk_size: if len(missile) < chunk_size:
raise AmmoFileError( raise AmmoFileError(
"Unexpected end of file: read %s bytes instead of %s" % "Unexpected end of file: read %s bytes instead of %s"
(len(missile), chunk_size)) % (len(missile), chunk_size))
yield (missile, marker) yield (missile, marker)
except (IndexError, ValueError) as e: except (IndexError, ValueError) as e:
raise AmmoFileError( raise AmmoFileError(
"Error while reading ammo file. Position: %s, header: '%s', original exception: %s" % "Error while reading ammo file. Position: %s, header: '%s', original exception: %s"
(ammo_file.tell(), chunk_header, e)) % (ammo_file.tell(), chunk_header, e))
chunk_header = read_chunk_header(ammo_file) chunk_header = read_chunk_header(ammo_file)
if chunk_header == '': if chunk_header == '':
ammo_file.seek(0) ammo_file.seek(0)
@ -242,12 +241,14 @@ class AccessLogReader(object):
method, uri, proto = request.split() method, uri, proto = request.split()
http_ver = proto.split('/')[1] http_ver = proto.split('/')[1]
if method == "GET": if method == "GET":
yield (HttpAmmo(uri, yield (
headers=self.headers, HttpAmmo(
http_ver=http_ver, ).to_s(), None) uri,
headers=self.headers,
http_ver=http_ver, ).to_s(), None)
else: else:
self.warn("Skipped line: %s (unsupported method)" % self.warn(
line) "Skipped line: %s (unsupported method)" % line)
except (ValueError, IndexError) as e: except (ValueError, IndexError) as e:
self.warn("Skipped line: %s (%s)" % (line, e)) self.warn("Skipped line: %s (%s)" % (line, e))
ammo_file.seek(0) ammo_file.seek(0)
@ -260,7 +261,6 @@ def _parse_header(header):
class UriReader(object): class UriReader(object):
def __init__(self, filename, headers=[], http_ver='1.1', **kwargs): def __init__(self, filename, headers=[], http_ver='1.1', **kwargs):
self.filename = filename self.filename = filename
self.headers = {} self.headers = {}
@ -278,8 +278,8 @@ class UriReader(object):
for line in ammo_file: for line in ammo_file:
info.status.af_position = ammo_file.tell() info.status.af_position = ammo_file.tell()
if line.startswith('['): if line.startswith('['):
self.headers.update(_parse_header(line.strip( self.headers.update(
'\r\n[]\t '))) _parse_header(line.strip('\r\n[]\t ')))
elif len(line.rstrip('\r\n')): elif len(line.rstrip('\r\n')):
fields = line.split() fields = line.split()
uri = fields[0] uri = fields[0]
@ -287,13 +287,14 @@ class UriReader(object):
marker = fields[1] marker = fields[1]
else: else:
marker = None marker = None
yield (HttpAmmo(uri, yield (
headers=[ HttpAmmo(
': '.join(header) uri,
for header in self.headers.items() headers=[
], ': '.join(header)
http_ver=self.http_ver, ).to_s(), for header in self.headers.items()
marker) ],
http_ver=self.http_ver, ).to_s(), marker)
if info.status.ammo_count == 0: if info.status.ammo_count == 0:
self.log.error("No ammo in uri-style file") self.log.error("No ammo in uri-style file")
raise AmmoFileError("No ammo! Cover me!") raise AmmoFileError("No ammo! Cover me!")
@ -339,8 +340,8 @@ class UriPostReader(object):
chunk_size = int(fields[0]) chunk_size = int(fields[0])
if chunk_size == 0: if chunk_size == 0:
self.log.debug( self.log.debug(
'Zero-sized chunk in ammo file at %s. Starting over.' % 'Zero-sized chunk in ammo file at %s. Starting over.'
ammo_file.tell()) % ammo_file.tell())
ammo_file.seek(0) ammo_file.seek(0)
info.status.inc_loop_count() info.status.inc_loop_count()
chunk_header = read_chunk_header(ammo_file) chunk_header = read_chunk_header(ammo_file)
@ -350,21 +351,22 @@ class UriPostReader(object):
missile = ammo_file.read(chunk_size) missile = ammo_file.read(chunk_size)
if len(missile) < chunk_size: if len(missile) < chunk_size:
raise AmmoFileError( raise AmmoFileError(
"Unexpected end of file: read %s bytes instead of %s" % "Unexpected end of file: read %s bytes instead of %s"
(len(missile), chunk_size)) % (len(missile), chunk_size))
yield (HttpAmmo(uri=uri, yield (
headers=[ HttpAmmo(
': '.join(header) uri=uri,
for header in self.headers.items() headers=[
], ': '.join(header)
method='POST', for header in self.headers.items()
body=missile, ],
http_ver=self.http_ver, ).to_s(), method='POST',
marker) body=missile,
http_ver=self.http_ver, ).to_s(), marker)
except (IndexError, ValueError) as e: except (IndexError, ValueError) as e:
raise AmmoFileError( raise AmmoFileError(
"Error while reading ammo file. Position: %s, header: '%s', original exception: %s" % "Error while reading ammo file. Position: %s, header: '%s', original exception: %s"
(ammo_file.tell(), chunk_header, e)) % (ammo_file.tell(), chunk_header, e))
chunk_header = read_chunk_header(ammo_file) chunk_header = read_chunk_header(ammo_file)
if chunk_header == '': if chunk_header == '':
self.log.debug( self.log.debug(

View File

@ -5,25 +5,48 @@ from yandextank.stepper.util import take
class TestCreate(object): class TestCreate(object):
@pytest.mark.parametrize(
@pytest.mark.parametrize('n, loadplan, expected', [ 'n, loadplan, expected',
(7, LoadPlanBuilder().ramp(5, 4000).create(), [0, 1000, 2000, 3000, 4000, 0, 0]), [(
(7, create(['ramp(5, 4s)']), [0, 1000, 2000, 3000, 4000, 0, 0]), 7, LoadPlanBuilder().ramp(5, 4000).create(),
(12, create(['ramp(5, 4s)', 'wait(5s)', 'ramp(5,4s)']), [0, 1000, 2000, 3000, 4000, 9000, 10000, 11000, 12000, 13000, 0, 0]), [0, 1000, 2000, 3000, 4000, 0, 0]
(7, create(['wait(5s)', 'ramp(5, 0)']), [5000, 5000, 5000, 5000, 5000, 0, 0]), ), (
(7, create([]), [0, 0, 0, 0, 0, 0, 0]), 7, create(['ramp(5, 4s)']),
(12, create(['line(1, 9, 4s)']), [0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 0, 0, 0]), [0, 1000, 2000, 3000, 4000, 0, 0]
(12, create(['const(3, 5s)', 'line(7, 11, 2s)']), [0, 0, 0, 5000, 5000, 5000, 5000, 5500, 6000, 6500, 7000, 0]), ), (
(12, create(['step(2, 10, 2, 3s)']), [0, 0, 3000, 3000, 6000, 6000, 9000, 9000, 12000, 12000, 0, 0]), 12, create(['ramp(5, 4s)', 'wait(5s)', 'ramp(5,4s)']),
(12, LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).steps, [(3, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1)]), [0, 1000, 2000, 3000, 4000, 9000, 10000, 11000, 12000, 13000, 0, 0]
(12, LoadPlanBuilder().stairway(100, 950, 100, 30000).steps, [(100, 30), (200, 30), (300, 30), (400, 30), (500, 30), (600, 30), (700, 30), (800, 30), (900, 30), (950, 30)])]) ), (
7, create(['wait(5s)', 'ramp(5, 0)']),
[5000, 5000, 5000, 5000, 5000, 0, 0]
), (
7, create([]),
[0, 0, 0, 0, 0, 0, 0]
), (
12, create(['line(1, 9, 4s)']),
[0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 0, 0, 0]
), (
12, create(['const(3, 5s)', 'line(7, 11, 2s)']),
[0, 0, 0, 5000, 5000, 5000, 5000, 5500, 6000, 6500, 7000, 0]
), (
12, create(['step(2, 10, 2, 3s)']),
[0, 0, 3000, 3000, 6000, 6000, 9000, 9000, 12000, 12000, 0, 0]
), (
12, LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).steps,
[(3, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1)]
), (
12, LoadPlanBuilder().stairway(100, 950, 100, 30000).steps,
[
(100, 30), (200, 30), (300, 30), (400, 30), (500, 30),
(600, 30), (700, 30), (800, 30), (900, 30), (950, 30)]
)]) # yapf:disable
def test_steps(self, n, loadplan, expected): def test_steps(self, n, loadplan, expected):
assert take(n, loadplan) == expected assert take(n, loadplan) == expected
@pytest.mark.parametrize('loadplan, expected', [ @pytest.mark.parametrize(
(LoadPlanBuilder().stairway(100, 950, 100, 30000), 950), 'loadplan, expected',
(LoadPlanBuilder().const(3, 1000).line(5, 10, 5000), 10), [(LoadPlanBuilder().stairway(100, 950, 100, 30000), 950),
(LoadPlanBuilder().line(1, 100, 60000), 100) (LoadPlanBuilder().const(3, 1000).line(5, 10, 5000), 10),
]) (LoadPlanBuilder().line(1, 100, 60000), 100)])
def test_instances(self, loadplan, expected): def test_instances(self, loadplan, expected):
assert loadplan.instances == expected assert loadplan.instances == expected

View File

@ -4,7 +4,6 @@ from yandextank.stepper.util import take
class TestLine(object): class TestLine(object):
def test_get_rps_list(self): def test_get_rps_list(self):
lp = create(["line(1, 100, 10s)"]) lp = create(["line(1, 100, 10s)"])
rps_list = lp.get_rps_list() rps_list = lp.get_rps_list()
@ -12,23 +11,19 @@ class TestLine(object):
assert rps_list[-1][0] == 100 assert rps_list[-1][0] == 100
@pytest.mark.parametrize("rps, duration, rps_list", [ @pytest.mark.parametrize(
(100, 3000, [(100, 3)]), "rps, duration, rps_list",
(0, 3000, [(0, 3)]), [(100, 3000, [(100, 3)]), (0, 3000, [(0, 3)]), (100, 0, [(100, 0)])])
(100, 0, [(100, 0)])
])
class TestConst(object): class TestConst(object):
@pytest.mark.parametrize(
@pytest.mark.parametrize("check_point, expected", [ "check_point, expected",
(lambda duration: 0, lambda rps: rps), [(lambda duration: 0, lambda rps: rps),
(lambda duration: duration / 2, lambda rps: rps), (lambda duration: duration / 2, lambda rps: rps),
(lambda duration: duration + 1, lambda rps: 0), (lambda duration: duration + 1, lambda rps: 0),
(lambda duration: -1, lambda rps: 0) (lambda duration: -1, lambda rps: 0)])
])
def test_rps_at(self, rps, duration, rps_list, check_point, expected): def test_rps_at(self, rps, duration, rps_list, check_point, expected):
assert Const( assert Const(rps,
rps, duration).rps_at( duration).rps_at(check_point(duration)) == expected(rps)
check_point(duration)) == expected(rps)
def test_get_rps_list(self, rps, duration, rps_list): def test_get_rps_list(self, rps, duration, rps_list):
assert Const(rps, duration).get_rps_list() == rps_list assert Const(rps, duration).get_rps_list() == rps_list
@ -36,132 +31,118 @@ class TestConst(object):
class TestLineNew(object): class TestLineNew(object):
@pytest.mark.parametrize(
@pytest.mark.parametrize("min_rps, max_rps, duration, check_point, expected", [ "min_rps, max_rps, duration, check_point, expected",
(0, 10, 30 * 1000, 0, 0), [(0, 10, 30 * 1000, 0, 0), (0, 10, 30 * 1000, 10, 3),
(0, 10, 30 * 1000, 10, 3), (0, 10, 30 * 1000, 29, 10), (9, 10, 30 * 1000, 1, 9),
(0, 10, 30 * 1000, 29, 10), (9, 10, 30 * 1000, 20, 10)])
(9, 10, 30 * 1000, 1, 9),
(9, 10, 30 * 1000, 20, 10)
])
def test_rps_at(self, min_rps, max_rps, duration, check_point, expected): def test_rps_at(self, min_rps, max_rps, duration, check_point, expected):
assert round( assert round(Line(min_rps, max_rps, duration).rps_at(
Line( check_point)) == expected
min_rps,
max_rps,
duration).rps_at(check_point)) == expected
@pytest.mark.parametrize("min_rps, max_rps, duration, check_point, expected", [ @pytest.mark.parametrize(
(0, 10, 20 * 1000, 9, (9, 2)), "min_rps, max_rps, duration, check_point, expected",
(0, 10, 30 * 1000, 0, (0, 2)), [
(0, 10, 30 * 1000, 5, (5, 3)), (0, 10, 20 * 1000, 9, (9, 2)),
(0, 10, 30 * 1000, 10, (10, 2)), (0, 10, 30 * 1000, 0, (0, 2)),
(0, 10, 3 * 1000, 0, (0, 1)), (0, 10, 30 * 1000, 5, (5, 3)),
(0, 10, 3 * 1000, 1, (3, 1)), (0, 10, 30 * 1000, 10, (10, 2)),
(0, 10, 3 * 1000, 2, (7, 1)), (0, 10, 3 * 1000, 0, (0, 1)),
(0, 10, 3 * 1000, 3, (10, 1)), (0, 10, 3 * 1000, 1, (3, 1)),
(9, 10, 30 * 1000, 0, (9, 15)), (0, 10, 3 * 1000, 2, (7, 1)),
(9, 10, 30 * 1000, 1, (10, 16)), (0, 10, 3 * 1000, 3, (10, 1)),
(10, 10, 30 * 1000, 0, (10, 31)), # strange (9, 10, 30 * 1000, 0, (9, 15)),
(10, 0, 30 * 1000, 0, (10, 2)), (9, 10, 30 * 1000, 1, (10, 16)),
(10, 0, 30 * 1000, 1, (9, 3)), (10, 10, 30 * 1000, 0, (10, 31)), # strange
(10, 0, 30 * 1000, 9, (1, 3)), (10, 0, 30 * 1000, 0, (10, 2)),
(10, 0, 30 * 1000, 10, (0, 2)), (10, 0, 30 * 1000, 1, (9, 3)),
]) (10, 0, 30 * 1000, 9, (1, 3)),
(10, 0, 30 * 1000, 10, (0, 2)),
])
def test_get_rps_list( def test_get_rps_list(
self, self, min_rps, max_rps, duration, check_point, expected):
min_rps, assert Line(min_rps, max_rps,
max_rps, duration).get_rps_list()[check_point] == expected
duration,
check_point,
expected):
assert Line(min_rps, max_rps, duration).get_rps_list()[
check_point] == expected
@pytest.mark.parametrize("min_rps, max_rps, duration, expected_len, threshold, len_above_threshold", [ @pytest.mark.parametrize(
(2, 12, 25000, 175, 5000, 160), "min_rps, max_rps, duration, expected_len, threshold, len_above_threshold",
(2, 12, 25000, 175, 10000, 135), [
(2, 12, 25000, 175, 15000, 100), (2, 12, 25000, 175, 5000, 160),
(2, 12, 25000, 175, 20000, 55), (2, 12, 25000, 175, 10000, 135),
(2, 12, 25000, 175, 15000, 100),
(0, 10, 25000, 125, 15000, 80), (2, 12, 25000, 175, 20000, 55),
(0, 10, 25000, 125, 15000, 80),
(10, 12, 20000, 220, 10000, 115), (10, 12, 20000, 220, 10000, 115),
(10, 10, 20000, 200, 10000, 100), (10, 10, 20000, 200, 10000, 100),
(10, 0, 25000, 125, 10000, 45),
(10, 0, 25000, 125, 10000, 45), (10, 0, 25000, 125, 15000, 20),
(10, 0, 25000, 125, 15000, 20), ])
])
def test_iter( def test_iter(
self, self, min_rps, max_rps, duration, expected_len, threshold,
min_rps,
max_rps,
duration,
expected_len,
threshold,
len_above_threshold): len_above_threshold):
load_plan = Line(min_rps, max_rps, duration) load_plan = Line(min_rps, max_rps, duration)
assert len(load_plan) == expected_len assert len(load_plan) == expected_len
assert len([ts for ts in load_plan if ts >= threshold] assert len(
) == len_above_threshold [ts for ts in load_plan if ts >= threshold]) == len_above_threshold
class TestComposite(object): class TestComposite(object):
@pytest.mark.parametrize(
@pytest.mark.parametrize("steps, expected_len", [ "steps, expected_len", [([Line(0, 10, 20000), Const(10, 10000)], 200),
([Line(0, 10, 20000), Const(10, 10000)], 200), ([Line(0, 10, 20000), Line(10, 0, 20000)], 200),
([Line(0, 10, 20000), Line(10, 0, 20000)], 200), ([Const(5, 10000), Const(10, 5000)], 100)])
([Const(5, 10000), Const(10, 5000)], 100)
])
def test_iter(self, steps, expected_len): def test_iter(self, steps, expected_len):
assert len(Composite(steps)) == expected_len assert len(Composite(steps)) == expected_len
@pytest.mark.parametrize("steps, check_point, expected", [ @pytest.mark.parametrize(
([Line(0, 10, 20000), Const(10, 10000)], 9, (9, 2)), "steps, check_point, expected", [
([Line(0, 10, 20000), Const(10, 10000)], 10, (10, 2)), ([Line(0, 10, 20000), Const(10, 10000)], 9, (9, 2)),
([Line(0, 10, 20000), Const(10, 10000)], 11, (10, 10)), ([Line(0, 10, 20000), Const(10, 10000)], 10, (10, 2)),
]) ([Line(0, 10, 20000), Const(10, 10000)], 11, (10, 10)),
])
def test_rps_list(self, steps, check_point, expected): def test_rps_list(self, steps, check_point, expected):
assert Composite(steps).get_rps_list()[check_point] == expected assert Composite(steps).get_rps_list()[check_point] == expected
class TestStairway(object): class TestStairway(object):
@pytest.mark.parametrize(
@pytest.mark.parametrize("min_rps, max_rps, increment, step_duration, expected_len, threshold, len_above_threshold", "min_rps, max_rps, increment, step_duration, expected_len, threshold, len_above_threshold",
[ [(0, 1000, 50, 3000, 31500, 9000, 31050),
(0, 1000, 50, 3000, 31500, 9000, 31050), (0, 1000, 50, 3000, 31500, 15000, 30000),
(0, 1000, 50, 3000, 31500, 15000, 30000), (0, 1000, 50, 3000, 31500, 45000, 15750)])
(0, 1000, 50, 3000, 31500, 45000, 15750)
])
def test_iter( def test_iter(
self, self, min_rps, max_rps, increment, step_duration, expected_len,
min_rps, threshold, len_above_threshold):
max_rps,
increment,
step_duration,
expected_len,
threshold,
len_above_threshold):
load_plan = Stairway(min_rps, max_rps, increment, step_duration) load_plan = Stairway(min_rps, max_rps, increment, step_duration)
assert len(load_plan) == expected_len assert len(load_plan) == expected_len
assert len([ts for ts in load_plan if ts >= threshold] assert len(
) == len_above_threshold [ts for ts in load_plan if ts >= threshold]) == len_above_threshold
class TestCreate(object): class TestCreate(object):
@pytest.mark.parametrize(
@pytest.mark.parametrize('rps_schedule, check_point, expected', [ 'rps_schedule, check_point, expected', [
(['line(1, 5, 2s)'], 100, [0, 618, 1000, 1302, 1561, 1791]), (['line(1, 5, 2s)'], 100, [0, 618, 1000, 1302, 1561, 1791]),
(['line(1.1, 5.8, 2s)'], 100, [0, 566, 917, 1196, 1435, 1647]), (['line(1.1, 5.8, 2s)'], 100, [0, 566, 917, 1196, 1435, 1647]),
(['line(5, 1, 2s)'], 100, [0, 208, 438, 697, 1000, 1381]), (['line(5, 1, 2s)'], 100, [0, 208, 438, 697, 1000, 1381]),
(['const(1, 10s)'], 100, [0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]), (['const(1, 10s)'], 100,
(['const(200, 0.1s)'], 100, [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95]), [0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]),
(['const(1, 2s)', 'const(2, 2s)'], 100, [0, 1000, 2000, 2500, 3000, 3500]), (['const(200, 0.1s)'], 100, [
(['const(1.5, 10s)'], 100, [0, 666, 1333, 2000, 2666, 3333, 4000, 4666, 5333, 6000, 6666, 7333, 8000, 8666, 9333]), 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75,
(['step(1, 5, 1, 5s)'], 10, [0, 1000, 2000, 3000, 4000, 5000, 5500, 6000, 6500, 7000]), 80, 85, 90, 95
(['step(1.2, 5.7, 1.1, 5s)'], 10, [0, 833, 1666, 2500, 3333, 4166, 5000, 5434, 5869, 6304]), ]),
(['const(1, 1)'], 10, [0]), (['const(1, 2s)', 'const(2, 2s)'], 100,
]) [0, 1000, 2000, 2500, 3000, 3500]),
(['const(1.5, 10s)'], 100, [
0, 666, 1333, 2000, 2666, 3333, 4000, 4666, 5333, 6000, 6666,
7333, 8000, 8666, 9333
]),
(['step(1, 5, 1, 5s)'], 10,
[0, 1000, 2000, 3000, 4000, 5000, 5500, 6000, 6500, 7000]),
(['step(1.2, 5.7, 1.1, 5s)'], 10,
[0, 833, 1666, 2500, 3333, 4166, 5000, 5434, 5869, 6304]),
(['const(1, 1)'], 10, [0]),
])
def test_create(self, rps_schedule, check_point, expected): def test_create(self, rps_schedule, check_point, expected):
# pytest.set_trace() # pytest.set_trace()
assert take(check_point, (create(rps_schedule))) == expected assert take(check_point, (create(rps_schedule))) == expected

View File

@ -31,7 +31,12 @@ def parse_duration(duration):
_re_token = re.compile("([0-9.]+)([dhms]?)") _re_token = re.compile("([0-9.]+)([dhms]?)")
def parse_token(time, multiplier): def parse_token(time, multiplier):
multipliers = {'d': 86400, 'h': 3600, 'm': 60, 's': 1, } multipliers = {
'd': 86400,
'h': 3600,
'm': 60,
's': 1,
}
if multiplier: if multiplier:
if multiplier in multipliers: if multiplier in multipliers:
return int(float(time) * multipliers[multiplier] * 1000) return int(float(time) * multipliers[multiplier] * 1000)