mirror of
https://github.com/valitydev/yandex-tank.git
synced 2024-11-06 10:25:17 +00:00
Remove unnecessary select
Add dotted option support to default dir
This commit is contained in:
parent
364a72ecd6
commit
097ed55fc8
@ -166,17 +166,7 @@ class ConsoleTank:
|
||||
def __override_config_from_cmdline(self):
|
||||
# override config options from command line
|
||||
if self.options.option:
|
||||
for option_str in self.options.option:
|
||||
try:
|
||||
section = option_str[:option_str.index('.')]
|
||||
option = option_str[option_str.index('.') + 1:option_str.index('=')]
|
||||
except ValueError:
|
||||
section = self.MIGRATE_SECTION
|
||||
option = option_str[:option_str.index('=')]
|
||||
value = option_str[option_str.index('=') + 1:]
|
||||
self.log.debug("Override option: %s => [%s] %s=%s", option_str, section, option, value)
|
||||
self.core.set_option(section, option, value)
|
||||
|
||||
self.core.apply_shorthand_options(self.options.option, self.MIGRATE_SECTION)
|
||||
|
||||
def __there_is_locks(self):
|
||||
retcode = False
|
||||
|
42
Tank/Core.py
42
Tank/Core.py
@ -12,8 +12,6 @@ import time
|
||||
import traceback
|
||||
import datetime
|
||||
|
||||
# TODO: 3 add ability to set options in "section.option" style in DEFAULT section
|
||||
|
||||
class TankCore:
|
||||
"""
|
||||
JMeter + dstat inspired :)
|
||||
@ -38,6 +36,11 @@ class TankCore:
|
||||
'''
|
||||
self.log.info("Loading configs...")
|
||||
self.config.load_files(configs)
|
||||
dotted_options = []
|
||||
for option, value in self.config.get_options(self.SECTION):
|
||||
if '.' in option:
|
||||
dotted_options += [option + '=' + value]
|
||||
self.apply_shorthand_options(dotted_options, self.SECTION)
|
||||
self.config.flush()
|
||||
self.add_artifact_file(self.config.file)
|
||||
|
||||
@ -160,20 +163,23 @@ class TankCore:
|
||||
|
||||
return retcode
|
||||
|
||||
|
||||
def __collect_artifacts(self):
|
||||
if not self.artifacts_dir:
|
||||
date_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S.")
|
||||
self.artifacts_dir = tempfile.mkdtemp("", date_str, self.artifacts_base_dir)
|
||||
elif not os.path.isdir(self.artifacts_dir):
|
||||
os.makedirs(self.artifacts_dir)
|
||||
self.log.info("Artifacts dir: %s", self.artifacts_dir)
|
||||
for filename, keep in self.artifact_files.items():
|
||||
self.__collect_file(filename, keep)
|
||||
|
||||
def plugins_post_process(self, retcode):
|
||||
'''
|
||||
Call post_process() on all plugins
|
||||
'''
|
||||
self.log.info("Post-processing test...")
|
||||
|
||||
if not self.artifacts_dir:
|
||||
date_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S.")
|
||||
self.artifacts_dir = tempfile.mkdtemp("", date_str, self.artifacts_base_dir)
|
||||
else:
|
||||
if not os.path.isdir(self.artifacts_dir):
|
||||
os.makedirs(self.artifacts_dir)
|
||||
self.log.info("Artifacts dir: %s", self.artifacts_dir)
|
||||
|
||||
for plugin_key in self.plugins_order:
|
||||
plugin = self.__get_plugin_by_key(plugin_key)
|
||||
self.log.debug("Post-process %s", plugin)
|
||||
@ -186,8 +192,7 @@ class TankCore:
|
||||
if not retcode:
|
||||
retcode = 1
|
||||
|
||||
for (filename, keep) in self.artifact_files.items():
|
||||
self.__collect_file(filename, keep)
|
||||
self.__collect_artifacts()
|
||||
|
||||
return retcode
|
||||
|
||||
@ -277,6 +282,19 @@ class TankCore:
|
||||
'''
|
||||
if filename:
|
||||
self.artifact_files[filename] = keep_original
|
||||
|
||||
|
||||
def apply_shorthand_options(self, options, default_section='DEFAULT'):
|
||||
for option_str in options:
|
||||
try:
|
||||
section = option_str[:option_str.index('.')]
|
||||
option = option_str[option_str.index('.') + 1:option_str.index('=')]
|
||||
except ValueError:
|
||||
section = default_section
|
||||
option = option_str[:option_str.index('=')]
|
||||
value = option_str[option_str.index('=') + 1:]
|
||||
self.log.debug("Override option: %s => [%s] %s=%s", option_str, section, option, value)
|
||||
self.set_option(section, option, value)
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
|
@ -385,7 +385,6 @@ class MonitoringCollector:
|
||||
|
||||
return [config, filter_obj]
|
||||
|
||||
# FIXME: 3 simplify this filtering hell with built-in filter()
|
||||
def filtering(self, mask, filter_list):
|
||||
host = filter_list[0]
|
||||
initial = [0, 1]
|
||||
|
@ -174,7 +174,7 @@ class AbstractReader:
|
||||
|
||||
|
||||
def __calculate_aggregates(self, item):
|
||||
# TODO: 2 make total qunatiles more precise
|
||||
# TODO: 2 make total quantiles more precise
|
||||
if item.RPS:
|
||||
if item.avg_response_time:
|
||||
item.selfload = 100 * item.selfload / item.RPS
|
||||
|
@ -577,52 +577,48 @@ class PhantomReader(AbstractReader):
|
||||
|
||||
|
||||
def __read_phout_data(self, force):
|
||||
# FIXME: 3 select is useless here
|
||||
phout_ready = select.select([self.phout], [], [], 0)[0]
|
||||
self.log.debug("Selected phout: %s", phout_ready)
|
||||
if phout_ready:
|
||||
phout = phout_ready.pop(0).readlines()
|
||||
self.log.debug("About to process %s phout lines", len(phout))
|
||||
for line in phout:
|
||||
line = self.partial_buffer + line
|
||||
self.partial_buffer = ''
|
||||
if line[-1] != "\n":
|
||||
self.partial_buffer = line
|
||||
continue
|
||||
line = line.strip()
|
||||
if not line:
|
||||
return None
|
||||
#1346949510.514 74420 66 78 65409 8867 74201 18 15662 0 200
|
||||
#self.log.debug("Phout line: %s", line)
|
||||
data = line.split("\t")
|
||||
if len(data) != 12:
|
||||
self.log.warning("Wrong phout line, skipped: %s", line)
|
||||
continue
|
||||
cur_time = int(float(data[0]) + float(data[2]) / 1000000)
|
||||
#self.log.info("%s => %s", data[0], cur_time)
|
||||
try:
|
||||
active = self.stat_data[cur_time]
|
||||
except KeyError:
|
||||
#self.log.debug("No tasks info for second yet: %s", cur_time)
|
||||
active = 0
|
||||
|
||||
if not cur_time in self.data_buffer.keys():
|
||||
self.first_request_time = min(self.first_request_time, int(float(data[0])))
|
||||
if self.data_queue and self.data_queue[-1] >= cur_time:
|
||||
self.log.warning("Aggregator data dates must be sequential: %s vs %s" % (cur_time, self.data_queue[-1]))
|
||||
cur_time = self.data_queue[-1]
|
||||
else:
|
||||
self.data_queue.append(cur_time)
|
||||
self.data_buffer[cur_time] = []
|
||||
# marker, threads, overallRT, httpCode, netCode
|
||||
data_item = [data[1], active, int(data[2]) / 1000, data[11], data[10]]
|
||||
# bytes: sent received
|
||||
data_item += [int(data[8]), int(data[9])]
|
||||
# connect send latency receive
|
||||
data_item += [int(data[3]) / 1000, int(data[4]) / 1000, int(data[5]) / 1000, int(data[6]) / 1000]
|
||||
# accuracy
|
||||
data_item += [(float(data[7]) + 1) / (int(data[2]) + 1)]
|
||||
self.data_buffer[cur_time].append(data_item)
|
||||
phout = self.phout.readlines()
|
||||
self.log.debug("About to process %s phout lines", len(phout))
|
||||
for line in phout:
|
||||
line = self.partial_buffer + line
|
||||
self.partial_buffer = ''
|
||||
if line[-1] != "\n":
|
||||
self.partial_buffer = line
|
||||
continue
|
||||
line = line.strip()
|
||||
if not line:
|
||||
return None
|
||||
#1346949510.514 74420 66 78 65409 8867 74201 18 15662 0 200
|
||||
#self.log.debug("Phout line: %s", line)
|
||||
data = line.split("\t")
|
||||
if len(data) != 12:
|
||||
self.log.warning("Wrong phout line, skipped: %s", line)
|
||||
continue
|
||||
cur_time = int(float(data[0]) + float(data[2]) / 1000000)
|
||||
#self.log.info("%s => %s", data[0], cur_time)
|
||||
try:
|
||||
active = self.stat_data[cur_time]
|
||||
except KeyError:
|
||||
#self.log.debug("No tasks info for second yet: %s", cur_time)
|
||||
active = 0
|
||||
|
||||
if not cur_time in self.data_buffer.keys():
|
||||
self.first_request_time = min(self.first_request_time, int(float(data[0])))
|
||||
if self.data_queue and self.data_queue[-1] >= cur_time:
|
||||
self.log.warning("Aggregator data dates must be sequential: %s vs %s" % (cur_time, self.data_queue[-1]))
|
||||
cur_time = self.data_queue[-1]
|
||||
else:
|
||||
self.data_queue.append(cur_time)
|
||||
self.data_buffer[cur_time] = []
|
||||
# marker, threads, overallRT, httpCode, netCode
|
||||
data_item = [data[1], active, int(data[2]) / 1000, data[11], data[10]]
|
||||
# bytes: sent received
|
||||
data_item += [int(data[8]), int(data[9])]
|
||||
# connect send latency receive
|
||||
data_item += [int(data[3]) / 1000, int(data[4]) / 1000, int(data[5]) / 1000, int(data[6]) / 1000]
|
||||
# accuracy
|
||||
data_item += [(float(data[7]) + 1) / (int(data[2]) + 1)]
|
||||
self.data_buffer[cur_time].append(data_item)
|
||||
|
||||
if len(self.data_queue) > 2:
|
||||
return self.pop_second()
|
||||
|
@ -1,5 +1,4 @@
|
||||
from Tests.TankTests import TankTestCase
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
class TankCoreTestCase(TankTestCase):
|
||||
@ -22,6 +21,8 @@ class TankCoreTestCase(TankTestCase):
|
||||
def test_tankCore(self):
|
||||
paths = ['config/load.conf']
|
||||
self.foo.load_configs(paths)
|
||||
self.assertEquals('passed', self.foo.get_option('dotted', 'test'))
|
||||
|
||||
self.foo.load_plugins()
|
||||
self.foo.plugins_configure()
|
||||
self.foo.plugins_prepare_test()
|
||||
|
@ -1,3 +1,6 @@
|
||||
[DEFAULT]
|
||||
dotted.test=passed
|
||||
|
||||
[tank]
|
||||
someoption=somevalue
|
||||
plugin_ShellExec=Tank/Plugins/ShellExec.py
|
||||
|
Loading…
Reference in New Issue
Block a user