yapf autoformat

This commit is contained in:
Alexey Lavrenuke 2016-12-30 16:46:29 +03:00
parent 443bd5291d
commit 40706c55b8
75 changed files with 1701 additions and 1560 deletions

7
.style.yapf Normal file
View File

@ -0,0 +1,7 @@
[style]
based_on_style = pep8
COALESCE_BRACKETS = True
COLUMN_LIMIT = 80
DEDENT_CLOSING_BRACKETS = False
SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True
SPLIT_BEFORE_FIRST_ARGUMENT = True

View File

@ -2,7 +2,7 @@ from setuptools import setup, find_packages
setup(
name='yandextank',
version='1.8.29-9',
version='1.8.29-10',
description='a performance measurement tool',
longer_description='''
Yandex.Tank is a performance measurement and load testing automatization tool.

View File

@ -27,8 +27,9 @@ class ApiWorker:
file_handler = logging.FileHandler(self.log_filename)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s %(message)s"))
file_handler.setFormatter(
logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s %(message)s"))
logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout)
stderr_hdl = logging.StreamHandler(sys.stderr)
@ -65,8 +66,8 @@ class ApiWorker:
""" Make preparations before running Tank """
self.options = options
if self.options.get('lock_dir', None):
self.core.set_option(self.core.SECTION, "lock_dir",
self.options['lock_dir'])
self.core.set_option(
self.core.SECTION, "lock_dir", self.options['lock_dir'])
while True:
try:
@ -109,13 +110,14 @@ class ApiWorker:
except KeyboardInterrupt as ex:
self.log.info(
"Do not press Ctrl+C again, the test will be broken otherwise")
self.log.debug("Caught KeyboardInterrupt: %s",
traceback.format_exc(ex))
self.log.debug(
"Caught KeyboardInterrupt: %s", traceback.format_exc(ex))
try:
retcode = self.__graceful_shutdown()
except KeyboardInterrupt as ex:
self.log.debug("Caught KeyboardInterrupt again: %s",
traceback.format_exc(ex))
self.log.debug(
"Caught KeyboardInterrupt again: %s",
traceback.format_exc(ex))
self.log.info(
"User insists on exiting, aborting graceful shutdown...")
retcode = 1
@ -138,12 +140,13 @@ class ApiWorker:
for filename in conf_files:
if fnmatch.fnmatch(filename, '*.ini'):
configs += [
os.path.realpath(self.baseconfigs_location + os.sep +
filename)
os.path.realpath(
self.baseconfigs_location + os.sep + filename)
]
except OSError:
self.log.warn(self.baseconfigs_location +
' is not accessible to get configs list')
self.log.warn(
self.baseconfigs_location +
' is not accessible to get configs list')
configs += [os.path.expanduser('~/.yandex-tank')]
return configs

View File

@ -76,8 +76,8 @@ class AbstractPlugin(object):
def publish(self, key, value):
"""publish value to status"""
self.log.debug("Publishing status: %s/%s: %s", self.__class__.__name__,
key, value)
self.log.debug(
"Publishing status: %s/%s: %s", self.__class__.__name__, key, value)
self.core.publish(self.__class__.__name__, key, value)
def close(self):
@ -175,25 +175,20 @@ class AbstractCriterion(object):
class GeneratorPlugin(object):
DEFAULT_INFO = {'address': '',
'port': 80,
'instances': 1,
'ammo_file': '',
'rps_schedule': [],
'duration': 0,
'loop_count': 0}
DEFAULT_INFO = {
'address': '',
'port': 80,
'instances': 1,
'ammo_file': '',
'rps_schedule': [],
'duration': 0,
'loop_count': 0
}
class Info(object):
def __init__(
self,
address,
port,
instances,
ammo_file,
rps_schedule,
duration,
loop_count):
self, address, port, instances, ammo_file, rps_schedule,
duration, loop_count):
self.address = address
self.port = port
self.instances = instances

View File

@ -62,8 +62,7 @@ class ResourceManager(object):
'Reading large resource to memory: %s. Size: %s bytes',
filename, size)
except Exception as exc:
logger.debug('Unable to check resource size %s. %s', filename,
exc)
logger.debug('Unable to check resource size %s. %s', filename, exc)
with opener(filename, 'r') as resource:
content = resource.read()
return content
@ -145,20 +144,17 @@ class HttpOpener(object):
def open(self, *args, **kwargs):
with closing(
requests.get(
self.url,
stream=True,
verify=False,
timeout=self.timeout
)
) as stream:
requests.get(
self.url, stream=True, verify=False,
timeout=self.timeout)) as stream:
stream_iterator = stream.raw.stream(100, decode_content=True)
header = stream_iterator.next()
fmt = self.fmt_detector.detect_format(header)
logger.debug('Resource %s format detected: %s.', self.url, fmt)
if not self.force_download and fmt != 'gzip' and self.data_length > 10**8:
logger.info(
"Resource data is not gzipped and larger than 100MB. Reading from stream..")
"Resource data is not gzipped and larger than 100MB. Reading from stream.."
)
return HttpStreamWrapper(self.url)
else:
downloaded_f_path = self.download_file()
@ -176,44 +172,47 @@ class HttpOpener(object):
"Resource %s has already been downloaded to %s . Using it..",
self.url, tmpfile_path)
else:
logger.info("Downloading resource %s to %s", self.url,
tmpfile_path)
logger.info("Downloading resource %s to %s", self.url, tmpfile_path)
try:
data = requests.get(self.url, verify=False, timeout=10)
except requests.exceptions.Timeout as exc:
raise RuntimeError('Connection timeout reached '
'trying to download resource: %s \n'
'via HttpOpener: %s' % (self.url, exc))
raise RuntimeError(
'Connection timeout reached '
'trying to download resource: %s \n'
'via HttpOpener: %s' % (self.url, exc))
f = open(tmpfile_path, "wb")
f.write(data.content)
f.close()
logger.info("Successfully downloaded resource %s to %s",
self.url, tmpfile_path)
logger.info(
"Successfully downloaded resource %s to %s", self.url,
tmpfile_path)
return tmpfile_path
def get_request_info(self):
logger.info('Trying to get info about resource %s', self.url)
req = requests.Request('HEAD',
self.url,
headers={'Accept-Encoding': 'identity'})
req = requests.Request(
'HEAD', self.url, headers={'Accept-Encoding': 'identity'})
session = requests.Session()
prepared = session.prepare_request(req)
try:
self.data_info = session.send(prepared,
verify=False,
allow_redirects=True,
timeout=self.timeout)
except (requests.exceptions.Timeout,
self.data_info = session.send(
prepared,
verify=False,
allow_redirects=True,
timeout=self.timeout)
except (
requests.exceptions.Timeout,
requests.exceptions.ConnectionError) as exc:
logger.warning(
'Connection error trying to get info about resource %s \n'
'Exception: %s \n'
'Retrying...' % (self.url, exc))
try:
self.data_info = session.send(prepared,
verify=False,
allow_redirects=True,
timeout=self.timeout)
self.data_info = session.send(
prepared,
verify=False,
allow_redirects=True,
timeout=self.timeout)
except Exception as exc:
logger.debug(
'Connection error trying to get info about resource %s \n'
@ -228,12 +227,14 @@ class HttpOpener(object):
except requests.exceptions.HTTPError as exc:
if exc.response.status_code == 405:
logger.info(
"Resource storage does not support HEAD method. Ignore proto error and force download file.")
"Resource storage does not support HEAD method. Ignore proto error and force download file."
)
self.force_download = True
else:
raise RuntimeError('Invalid HTTP response '
'trying to get info about resource: %s \n'
'via HttpOpener: %s' % (self.url, exc))
raise RuntimeError(
'Invalid HTTP response '
'trying to get info about resource: %s \n'
'via HttpOpener: %s' % (self.url, exc))
@property
def get_filename(self):
@ -262,14 +263,13 @@ class HttpStreamWrapper:
self.pointer = 0
self.stream_iterator = None
self._content_consumed = False
self.chunk_size = 10 ** 3
self.chunk_size = 10**3
try:
self.stream = requests.get(self.url,
stream=True,
verify=False,
timeout=10)
self.stream = requests.get(
self.url, stream=True, verify=False, timeout=10)
self.stream_iterator = self.stream.iter_content(self.chunk_size)
except (requests.exceptions.Timeout,
except (
requests.exceptions.Timeout,
requests.exceptions.ConnectionError) as exc:
raise RuntimeError(
'Connection errors or timeout reached '
@ -278,9 +278,10 @@ class HttpStreamWrapper:
try:
self.stream.raise_for_status()
except requests.exceptions.HTTPError as exc:
raise RuntimeError('Invalid HTTP response'
'trying to open stream for resource: %s\n'
'via HttpStreamWrapper: %s' % (self.url, exc))
raise RuntimeError(
'Invalid HTTP response'
'trying to open stream for resource: %s\n'
'via HttpStreamWrapper: %s' % (self.url, exc))
def __enter__(self):
return self
@ -295,12 +296,11 @@ class HttpStreamWrapper:
def _reopen_stream(self):
self.stream.connection.close()
try:
self.stream = requests.get(self.url,
stream=True,
verify=False,
timeout=30)
self.stream = requests.get(
self.url, stream=True, verify=False, timeout=30)
self.stream_iterator = self.stream.iter_content(self.chunk_size)
except (requests.exceptions.Timeout,
except (
requests.exceptions.Timeout,
requests.exceptions.ConnectionError) as exc:
raise RuntimeError(
'Connection errors or timeout reached '
@ -309,9 +309,10 @@ class HttpStreamWrapper:
try:
self.stream.raise_for_status()
except requests.exceptions.HTTPError as exc:
raise RuntimeError('Invalid HTTP response'
'trying to reopen stream for resource: %s\n'
'via HttpStreamWrapper: %s' % (self.url, exc))
raise RuntimeError(
'Invalid HTTP response'
'trying to reopen stream for resource: %s\n'
'via HttpStreamWrapper: %s' % (self.url, exc))
self._content_consumed = False
def _enhance_buffer(self):
@ -334,7 +335,8 @@ class HttpStreamWrapper:
while '\n' not in self.buffer:
try:
self._enhance_buffer()
except (StopIteration, TypeError,
except (
StopIteration, TypeError,
requests.exceptions.StreamConsumedError):
self._content_consumed = True
break
@ -352,7 +354,8 @@ class HttpStreamWrapper:
while len(self.buffer) < chunk_size:
try:
self._enhance_buffer()
except (StopIteration, TypeError,
except (
StopIteration, TypeError,
requests.exceptions.StreamConsumedError):
break
if len(self.buffer) > chunk_size:

View File

@ -3,7 +3,6 @@ from yandextank.common.util import Drain, Chopper
class TestDrain(object):
def test_run(self):
"""
Test drain's run function (in a same thread)
@ -38,7 +37,6 @@ class TestDrain(object):
class TestChopper(object):
def test_output(self):
source = (range(i) for i in range(5))
expected = [0, 0, 1, 0, 1, 2, 0, 1, 2, 3]

View File

@ -43,7 +43,6 @@ class Drain(th.Thread):
class SecuredShell(object):
def __init__(self, host, port, username, timeout):
self.host = host
self.port = port
@ -52,20 +51,22 @@ class SecuredShell(object):
def connect(self):
logger.debug(
"Opening SSH connection to {host}:{port}".format(host=self.host,
port=self.port))
"Opening SSH connection to {host}:{port}".format(
host=self.host, port=self.port))
client = SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(AutoAddPolicy())
try:
client.connect(self.host,
port=self.port,
username=self.username,
timeout=self.timeout, )
client.connect(
self.host,
port=self.port,
username=self.username,
timeout=self.timeout, )
except ValueError as e:
logger.error(e)
logger.warning("""
logger.warning(
"""
Patching Crypto.Cipher.AES.new and making another attempt.
See here for the details:
@ -82,10 +83,11 @@ http://uucode.com/blog/2015/02/20/workaround-for-ctr-mode-needs-counter-paramete
return orig_new(key, *ls)
Crypto.Cipher.AES.new = fixed_AES_new
client.connect(self.host,
port=self.port,
username=self.username,
timeout=self.timeout, )
client.connect(
self.host,
port=self.port,
username=self.username,
timeout=self.timeout, )
return client
def execute(self, cmd):
@ -107,19 +109,17 @@ http://uucode.com/blog/2015/02/20/workaround-for-ctr-mode-needs-counter-paramete
return self.execute("mkdir -p %s" % path)
def send_file(self, local_path, remote_path):
logger.info("Sending [{local}] to {host}:[{remote}]".format(
local=local_path,
host=self.host,
remote=remote_path))
logger.info(
"Sending [{local}] to {host}:[{remote}]".format(
local=local_path, host=self.host, remote=remote_path))
with self.connect() as client, client.open_sftp() as sftp:
result = sftp.put(local_path, remote_path)
return result
def get_file(self, remote_path, local_path):
logger.info("Receiving from {host}:[{remote}] to [{local}]".format(
local=local_path,
host=self.host,
remote=remote_path))
logger.info(
"Receiving from {host}:[{remote}] to [{local}]".format(
local=local_path, host=self.host, remote=remote_path))
with self.connect() as client, client.open_sftp() as sftp:
result = sftp.get(remote_path, local_path)
return result
@ -129,39 +129,27 @@ http://uucode.com/blog/2015/02/20/workaround-for-ctr-mode-needs-counter-paramete
def check_ssh_connection():
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
logging.basicConfig(
level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
logging.getLogger("paramiko.transport").setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(
description='Test SSH connection for monitoring.')
parser.add_argument(
'-e', '--endpoint',
default='example.org',
help='which host to try')
'-e', '--endpoint', default='example.org', help='which host to try')
parser.add_argument(
'-u', '--username',
default=os.getlogin(),
help='SSH username')
'-u', '--username', default=os.getlogin(), help='SSH username')
parser.add_argument(
'-p', '--port',
default=22,
type=int,
help='SSH port')
parser.add_argument('-p', '--port', default=22, type=int, help='SSH port')
args = parser.parse_args()
logging.info(
"Checking SSH to %s@%s:%d",
args.username,
args.endpoint,
args.port)
"Checking SSH to %s@%s:%d", args.username, args.endpoint, args.port)
ssh = SecuredShell(args.endpoint, args.port, args.username, 10)
print(ssh.execute("ls -l"))
class AsyncSession(object):
def __init__(self, ssh, cmd):
self.client = ssh.connect()
self.session = self.client.get_transport().open_session()
@ -427,8 +415,8 @@ def expand_time(str_time, default_unit='s', multiplier=1):
result += value * 60 * 60 * 24 * 7
continue
else:
raise ValueError("String contains unsupported unit %s: %s" %
(unit, str_time))
raise ValueError(
"String contains unsupported unit %s: %s" % (unit, str_time))
return int(result * multiplier)
@ -460,11 +448,12 @@ def execute(cmd, shell=False, poll_period=1.0, catch_out=False):
cmd = shlex.split(cmd)
if catch_out:
process = subprocess.Popen(cmd,
shell=shell,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
process = subprocess.Popen(
cmd,
shell=shell,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
else:
process = subprocess.Popen(cmd, shell=shell, close_fds=True)
@ -503,14 +492,12 @@ def pairs(lst):
def update_status(status, multi_key, value):
if len(multi_key) > 1:
update_status(
status.setdefault(multi_key[0], {}), multi_key[1:], value)
update_status(status.setdefault(multi_key[0], {}), multi_key[1:], value)
else:
status[multi_key[0]] = value
class AddressWizard:
def __init__(self):
self.lookup_fn = socket.getaddrinfo
self.socket_class = socket.socket
@ -554,8 +541,9 @@ class AddressWizard:
resolved = self.lookup_fn(address_str, port)
logger.debug("Lookup result: %s", resolved)
except Exception as exc:
logger.debug("Exception trying to resolve hostname %s : %s",
address_str, traceback.format_exc(exc))
logger.debug(
"Exception trying to resolve hostname %s : %s", address_str,
traceback.format_exc(exc))
msg = "Failed to resolve hostname: %s. Error: %s"
raise RuntimeError(msg % (address_str, exc))
@ -565,7 +553,8 @@ class AddressWizard:
if explicit_port:
logger.warn(
"Using phantom.port option is deprecated. Use phantom.address=[address]:port instead")
"Using phantom.port option is deprecated. Use phantom.address=[address]:port instead"
)
port = int(explicit_port)
elif not port:
port = 80
@ -574,8 +563,9 @@ class AddressWizard:
try:
self.__test(family, (parsed_ip, port))
except RuntimeError as exc:
logger.warn("Failed TCP connection test using [%s]:%s",
parsed_ip, port)
logger.warn(
"Failed TCP connection test using [%s]:%s", parsed_ip,
port)
continue
return is_v6, parsed_ip, int(port), address_str
@ -589,8 +579,9 @@ class AddressWizard:
test_sock.settimeout(5)
test_sock.connect(sa)
except Exception as exc:
logger.debug("Exception on connect attempt [%s]:%s : %s", sa[0],
sa[1], traceback.format_exc(exc))
logger.debug(
"Exception on connect attempt [%s]:%s : %s", sa[0], sa[1],
traceback.format_exc(exc))
msg = "TCP Connection test failed for [%s]:%s, use phantom.connection_test=0 to disable it"
raise RuntimeError(msg % (sa[0], sa[1]))
finally:
@ -598,7 +589,6 @@ class AddressWizard:
class Chopper(object):
def __init__(self, source):
self.source = source

View File

@ -12,34 +12,40 @@ def main():
'-c',
'--config',
action='append',
help="Path to INI file containing run options, multiple options accepted")
parser.add_option('-f',
'--fail-lock',
action='store_true',
dest='lock_fail',
help="Don't wait for lock to release, fail test instead")
help="Path to INI file containing run options, multiple options accepted"
)
parser.add_option(
'-f',
'--fail-lock',
action='store_true',
dest='lock_fail',
help="Don't wait for lock to release, fail test instead")
parser.add_option(
'-i',
'--ignore-lock',
action='store_true',
dest='ignore_lock',
help="Ignore lock files from concurrent instances, has precedence before --lock-fail")
parser.add_option('-k',
'--lock-dir',
action='store',
dest='lock_dir',
type="string",
help="Directory for lock file")
parser.add_option('-l',
'--log',
action='store',
default="tank.log",
help="Tank log file location")
parser.add_option('-m',
'--manual-start',
action='store_true',
dest='manual_start',
help="Wait for Enter key to start the test")
help="Ignore lock files from concurrent instances, has precedence before --lock-fail"
)
parser.add_option(
'-k',
'--lock-dir',
action='store',
dest='lock_dir',
type="string",
help="Directory for lock file")
parser.add_option(
'-l',
'--log',
action='store',
default="tank.log",
help="Tank log file location")
parser.add_option(
'-m',
'--manual-start',
action='store_true',
dest='manual_start',
help="Wait for Enter key to start the test")
parser.add_option(
'-n',
'--no-rc',
@ -50,21 +56,25 @@ def main():
'-o',
'--option',
action='append',
help="Set config option, multiple options accepted, example: -o 'shellexec.start=pwd'")
parser.add_option('-q',
'--quiet',
action='store_true',
help="Less console output, only errors and warnings")
help="Set config option, multiple options accepted, example: -o 'shellexec.start=pwd'"
)
parser.add_option(
'-q',
'--quiet',
action='store_true',
help="Less console output, only errors and warnings")
parser.add_option(
'-s',
'--scheduled-start',
action='store',
dest='scheduled_start',
help="Start test at specified time, format 'YYYY-MM-DD hh:mm:ss', date part is optional")
parser.add_option('-v',
'--verbose',
action='store_true',
help="More console output, +debug messages")
help="Start test at specified time, format 'YYYY-MM-DD hh:mm:ss', date part is optional"
)
parser.add_option(
'-v',
'--verbose',
action='store_true',
help="More console output, +debug messages")
completion_helper = CompletionHelperOptionParser()
completion_helper.handle_request(parser)

View File

@ -37,9 +37,11 @@ class RealConsoleMarkup(object):
def clean_markup(self, orig_str):
''' clean markup from string '''
for val in [self.YELLOW, self.RED, self.RESET, self.CYAN,
self.BG_MAGENTA, self.WHITE, self.BG_GREEN, self.GREEN,
self.BG_BROWN, self.RED_DARK, self.MAGENTA, self.BG_CYAN]:
for val in [
self.YELLOW, self.RED, self.RESET, self.CYAN, self.BG_MAGENTA,
self.WHITE, self.BG_GREEN, self.GREEN, self.BG_BROWN,
self.RED_DARK, self.MAGENTA, self.BG_CYAN
]:
orig_str = orig_str.replace(val, '')
return orig_str
@ -101,8 +103,10 @@ class ConsoleTank:
if self.log_filename:
file_handler = logging.FileHandler(self.log_filename)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s"))
file_handler.setFormatter(
logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s"
))
logger.addHandler(file_handler)
# create console handler with a higher log level
@ -110,7 +114,8 @@ class ConsoleTank:
stderr_hdl = logging.StreamHandler(sys.stderr)
fmt_verbose = logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s")
"%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s"
)
fmt_regular = logging.Formatter(
"%(asctime)s [%(levelname)s] %(message)s", "%H:%M:%S")
@ -155,12 +160,13 @@ class ConsoleTank:
for filename in conf_files:
if fnmatch.fnmatch(filename, '*.ini'):
configs += [
os.path.realpath(self.baseconfigs_location + os.sep +
filename)
os.path.realpath(
self.baseconfigs_location + os.sep + filename)
]
except OSError:
self.log.warn(self.baseconfigs_location +
' is not accessible to get configs list')
self.log.warn(
self.baseconfigs_location +
' is not accessible to get configs list')
configs += [os.path.expanduser('~/.yandex-tank')]
return configs
@ -172,8 +178,8 @@ class ConsoleTank:
"Lock files ignored. This is highly unrecommended practice!")
if self.options.lock_dir:
self.core.set_option(self.core.SECTION, "lock_dir",
self.options.lock_dir)
self.core.set_option(
self.core.SECTION, "lock_dir", self.options.lock_dir)
while True:
try:
@ -202,7 +208,8 @@ class ConsoleTank:
elif os.path.exists(os.path.realpath('load.conf')):
# just for old 'lunapark' compatibility
self.log.warn(
"Using 'load.conf' is unrecommended, please use 'load.ini' instead")
"Using 'load.conf' is unrecommended, please use 'load.ini' instead"
)
conf_file = os.path.realpath('load.conf')
configs += [conf_file]
self.core.add_artifact_file(conf_file, True)
@ -260,11 +267,12 @@ class ConsoleTank:
self.core.plugins_configure()
self.core.plugins_prepare_test()
if self.scheduled_start:
self.log.info("Waiting scheduled time: %s...",
self.scheduled_start)
self.log.info(
"Waiting scheduled time: %s...", self.scheduled_start)
while datetime.datetime.now() < self.scheduled_start:
self.log.debug("Not yet: %s < %s", datetime.datetime.now(),
self.scheduled_start)
self.log.debug(
"Not yet: %s < %s",
datetime.datetime.now(), self.scheduled_start)
time.sleep(1)
self.log.info("Time has come: %s", datetime.datetime.now())
@ -283,13 +291,14 @@ class ConsoleTank:
sys.stdout.write(RealConsoleMarkup.RESET)
sys.stdout.write(RealConsoleMarkup.TOTAL_RESET)
self.signal_count += 1
self.log.debug("Caught KeyboardInterrupt: %s",
traceback.format_exc(ex))
self.log.debug(
"Caught KeyboardInterrupt: %s", traceback.format_exc(ex))
try:
retcode = self.__graceful_shutdown()
except KeyboardInterrupt as ex:
self.log.debug("Caught KeyboardInterrupt again: %s",
traceback.format_exc(ex))
self.log.debug(
"Caught KeyboardInterrupt again: %s",
traceback.format_exc(ex))
self.log.info(
"User insists on exiting, aborting graceful shutdown...")
retcode = 1
@ -310,7 +319,6 @@ class ConsoleTank:
class DevNullOpts:
def __init__(self):
pass
@ -318,21 +326,23 @@ class DevNullOpts:
class CompletionHelperOptionParser(OptionParser):
def __init__(self):
OptionParser.__init__(self, add_help_option=False)
self.add_option('--bash-switches-list',
action='store_true',
dest="list_switches",
help="Options list")
self.add_option('--bash-options-prev',
action='store',
dest="list_options_prev",
help="Options list")
self.add_option('--bash-options-cur',
action='store',
dest="list_options_cur",
help="Options list")
self.add_option(
'--bash-switches-list',
action='store_true',
dest="list_switches",
help="Options list")
self.add_option(
'--bash-options-prev',
action='store',
dest="list_options_prev",
help="Options list")
self.add_option(
'--bash-options-cur',
action='store',
dest="list_options_cur",
help="Options list")
def error(self, msg):
pass
@ -362,8 +372,9 @@ class CompletionHelperOptionParser(OptionParser):
plugin_keys = cmdtank.core.config.get_options(
cmdtank.core.SECTION, cmdtank.core.PLUGIN_PREFIX)
for (plugin_name, plugin_path) in plugin_keys:
opts.append(cmdtank.core.SECTION + '.' +
cmdtank.core.PLUGIN_PREFIX + plugin_name + '=')
opts.append(
cmdtank.core.SECTION + '.' + cmdtank.core.PLUGIN_PREFIX +
plugin_name + '=')
for plugin in cmdtank.core.plugins:
for option in plugin.get_available_options():

View File

@ -52,11 +52,10 @@ class Var(object):
class Int(Var):
def __init__(self, value=0):
if not isinstance(value, int):
raise ValueError("Value should be an integer, but it is '%s'" %
type(value))
raise ValueError(
"Value should be an integer, but it is '%s'" % type(value))
super(Int, self).__init__(value)
def inc(self, delta=1):
@ -76,8 +75,9 @@ class Metric(object):
if timestamp is None:
timestamp = int(time.time())
elif not isinstance(timestamp, int):
raise ValueError("Timestamp should be an integer, but it is '%s'" %
type(timestamp))
raise ValueError(
"Timestamp should be an integer, but it is '%s'" %
type(timestamp))
self.metric.put((timestamp, value))
def next(self):

View File

@ -30,12 +30,10 @@ if sys.version_info[0] < 3:
else:
import configparser as ConfigParser
logger = logging.getLogger(__name__)
class Job(object):
def __init__(
self,
name,
@ -114,8 +112,10 @@ class TankCore(object):
return self.uuid
def get_available_options(self):
return ["artifacts_base_dir", "artifacts_dir", "flush_config_to",
"taskset_path", "affinity"]
return [
"artifacts_base_dir", "artifacts_dir", "flush_config_to",
"taskset_path", "affinity"
]
def load_configs(self, configs):
""" Tells core to load configs set into options storage """
@ -129,8 +129,8 @@ class TankCore(object):
self.config.flush()
self.add_artifact_file(self.config.file)
self.set_option(self.SECTION, self.PID_OPTION, str(os.getpid()))
self.flush_config_to = self.get_option(self.SECTION, "flush_config_to",
"")
self.flush_config_to = self.get_option(
self.SECTION, "flush_config_to", "")
if self.flush_config_to:
self.config.flush(self.flush_config_to)
@ -148,29 +148,26 @@ class TankCore(object):
self.artifacts_dir_name = self.get_option(
self.SECTION, "artifacts_dir", "")
self.taskset_path = self.get_option(self.SECTION, 'taskset_path',
'taskset')
self.taskset_path = self.get_option(
self.SECTION, 'taskset_path', 'taskset')
self.taskset_affinity = self.get_option(self.SECTION, 'affinity', '')
options = self.config.get_options(self.SECTION, self.PLUGIN_PREFIX)
for (plugin_name, plugin_path) in options:
if not plugin_path:
logger.debug("Seems the plugin '%s' was disabled",
plugin_name)
logger.debug("Seems the plugin '%s' was disabled", plugin_name)
continue
logger.debug("Loading plugin %s from %s", plugin_name,
plugin_path)
logger.debug("Loading plugin %s from %s", plugin_name, plugin_path)
# FIXME cleanup an old deprecated plugin path format
if '/' in plugin_path:
logger.warning("Deprecated plugin path format: %s\n"
"Should be in pythonic format. Example:\n"
" plugin_jmeter=yandextank.plugins.JMeter",
plugin_path)
logger.warning(
"Deprecated plugin path format: %s\n"
"Should be in pythonic format. Example:\n"
" plugin_jmeter=yandextank.plugins.JMeter", plugin_path)
if plugin_path.startswith("Tank/Plugins/"):
plugin_path = "yandextank.plugins." + \
plugin_path.split('/')[-1].split('.')[0]
logger.warning("Converted plugin path to %s",
plugin_path)
logger.warning("Converted plugin path to %s", plugin_path)
else:
raise ValueError(
"Couldn't convert plugin path to new format:\n %s" %
@ -183,7 +180,8 @@ class TankCore(object):
"Deprecated plugin path format: %s\n"
"Tank plugins are now orginized using"
" namespace packages. Example:\n"
" plugin_jmeter=yandextank.plugins.JMeter", plugin_path)
" plugin_jmeter=yandextank.plugins.JMeter",
plugin_path)
plugin_path = plugin_path.replace(
"yatank_internal_", "yandextank.plugins.")
if plugin_path.startswith("yatank_"):
@ -191,7 +189,8 @@ class TankCore(object):
"Deprecated plugin path format: %s\n"
"Tank plugins are now orginized using"
" namespace packages. Example:\n"
" plugin_jmeter=yandextank.plugins.JMeter", plugin_path)
" plugin_jmeter=yandextank.plugins.JMeter",
plugin_path)
plugin_path = plugin_path.replace(
"yatank_", "yandextank.plugins.")
@ -201,9 +200,10 @@ class TankCore(object):
instance = getattr(plugin, 'Plugin')(self)
except:
logger.warning(
"Deprecated plugin classname: %s. Should be 'Plugin'", plugin)
instance = getattr(plugin, plugin_path.split(
'.')[-1] + 'Plugin')(self)
"Deprecated plugin classname: %s. Should be 'Plugin'",
plugin)
instance = getattr(
plugin, plugin_path.split('.')[-1] + 'Plugin')(self)
self.plugins.append(instance)
@ -247,26 +247,16 @@ class TankCore(object):
gen = None
self.job = Job(
name=self.get_option(
self.SECTION_META,
"job_name",
'none').decode('utf8'),
description=self.get_option(
self.SECTION_META,
"job_dsc",
'').decode('utf8'),
task=self.get_option(
self.SECTION_META,
'task',
'dir').decode('utf8'),
version=self.get_option(
self.SECTION_META,
'ver',
'').decode('utf8'),
name=self.get_option(self.SECTION_META, "job_name",
'none').decode('utf8'),
description=self.get_option(self.SECTION_META, "job_dsc",
'').decode('utf8'),
task=self.get_option(self.SECTION_META, 'task',
'dir').decode('utf8'),
version=self.get_option(self.SECTION_META, 'ver',
'').decode('utf8'),
config_copy=self.get_option(
self.SECTION_META,
'copy_config_to',
'config_copy'),
self.SECTION_META, 'copy_config_to', 'config_copy'),
monitoring_plugin=mon,
aggregator_plugin=aggregator,
generator_plugin=gen,
@ -319,9 +309,7 @@ class TankCore(object):
end_time = time.time()
diff = end_time - begin_time
logger.debug("Polling took %s", diff)
logger.debug("Tank status:\n%s",
json.dumps(self.status,
indent=2))
logger.debug("Tank status:\n%s", json.dumps(self.status, indent=2))
# screen refresh every 0.5 s
if diff < 0.5:
time.sleep(0.5 - diff)
@ -340,8 +328,8 @@ class TankCore(object):
logger.debug("RC after: %s", retcode)
except Exception as ex:
logger.error("Failed finishing plugin %s: %s", plugin, ex)
logger.debug("Failed finishing plugin: %s",
traceback.format_exc(ex))
logger.debug(
"Failed finishing plugin: %s", traceback.format_exc(ex))
if not retcode:
retcode = 1
@ -363,10 +351,10 @@ class TankCore(object):
retcode = plugin.post_process(retcode)
logger.debug("RC after: %s", retcode)
except Exception as ex:
logger.error("Failed post-processing plugin %s: %s", plugin,
ex)
logger.debug("Failed post-processing plugin: %s",
traceback.format_exc(ex))
logger.error("Failed post-processing plugin %s: %s", plugin, ex)
logger.debug(
"Failed post-processing plugin: %s",
traceback.format_exc(ex))
if not retcode:
retcode = 1
@ -380,16 +368,15 @@ class TankCore(object):
def taskset(self, pid, path, affinity):
if affinity:
args = "%s -pc %s %s" % (path, affinity, pid)
retcode, stdout, stderr = execute(args,
shell=True,
poll_period=0.1,
catch_out=True)
retcode, stdout, stderr = execute(
args, shell=True, poll_period=0.1, catch_out=True)
logger.debug('taskset stdout: %s', stdout)
if retcode != 0:
raise KeyError(stderr)
else:
logger.info("Enabled taskset for pid %s with affinity %s",
str(pid), affinity)
logger.info(
"Enabled taskset for pid %s with affinity %s",
str(pid), affinity)
def __collect_artifacts(self):
logger.debug("Collecting artifacts")
@ -427,8 +414,8 @@ class TankCore(object):
logger.debug("Expanding shell option %s", value)
retcode, stdout, stderr = execute(value[1:-1], True, 0.1, True)
if retcode or stderr:
raise ValueError("Error expanding option %s, RC: %s" %
(value, retcode))
raise ValueError(
"Error expanding option %s, RC: %s" % (value, retcode))
value = stdout.strip()
return value
@ -447,9 +434,10 @@ class TankCore(object):
Retrieve a plugin of desired class, KeyError raised otherwise
"""
logger.debug("Searching for plugin: %s", plugin_class)
matches = [plugin
for plugin in self.plugins
if isinstance(plugin, plugin_class)]
matches = [
plugin for plugin in self.plugins
if isinstance(plugin, plugin_class)
]
if len(matches) > 0:
if len(matches) > 1:
logger.debug(
@ -457,8 +445,7 @@ class TankCore(object):
plugin_class)
return matches[-1]
else:
raise KeyError("Requested plugin type not found: %s" %
plugin_class)
raise KeyError("Requested plugin type not found: %s" % plugin_class)
def __collect_file(self, filename, keep_original=False):
"""
@ -487,28 +474,30 @@ class TankCore(object):
Add file to be stored as result artifact on post-process phase
"""
if filename:
logger.debug("Adding artifact file to collect (keep=%s): %s",
keep_original, filename)
logger.debug(
"Adding artifact file to collect (keep=%s): %s", keep_original,
filename)
self.artifact_files[filename] = keep_original
def apply_shorthand_options(self, options, default_section='DEFAULT'):
for option_str in options:
try:
section = option_str[:option_str.index('.')]
option = option_str[
option_str.index('.') + 1:option_str.index('=')]
option = option_str[option_str.index('.') + 1:option_str.index(
'=')]
except ValueError:
section = default_section
option = option_str[:option_str.index('=')]
value = option_str[option_str.index('=') + 1:]
logger.debug("Override option: %s => [%s] %s=%s", option_str,
section, option, value)
logger.debug(
"Override option: %s => [%s] %s=%s", option_str, section,
option, value)
self.set_option(section, option, value)
def get_lock_dir(self):
if not self.lock_dir:
self.lock_dir = self.get_option(self.SECTION, "lock_dir",
self.LOCK_DIR)
self.lock_dir = self.get_option(
self.SECTION, "lock_dir", self.LOCK_DIR)
return os.path.expanduser(self.lock_dir)
@ -516,8 +505,8 @@ class TankCore(object):
if not force and self.__there_is_locks():
raise RuntimeError("There is lock files")
fh, self.lock_file = tempfile.mkstemp('.lock', 'lunapark_',
self.get_lock_dir())
fh, self.lock_file = tempfile.mkstemp(
'.lock', 'lunapark_', self.get_lock_dir())
os.close(fh)
os.chmod(self.lock_file, 0o644)
self.config.file = self.lock_file
@ -542,18 +531,19 @@ class TankCore(object):
info.read(full_name)
pid = info.get(TankCore.SECTION, self.PID_OPTION)
if not pid_exists(int(pid)):
logger.debug("Lock PID %s not exists, ignoring and "
"trying to remove", pid)
logger.debug(
"Lock PID %s not exists, ignoring and "
"trying to remove", pid)
try:
os.remove(full_name)
except Exception as exc:
logger.debug("Failed to delete lock %s: %s",
full_name, exc)
logger.debug(
"Failed to delete lock %s: %s", full_name, exc)
else:
retcode = True
except Exception as exc:
logger.warn("Failed to load info from lock %s: %s",
full_name, exc)
logger.warn(
"Failed to load info from lock %s: %s", full_name, exc)
retcode = True
return retcode
@ -584,14 +574,15 @@ class TankCore(object):
plugin.close()
except Exception as ex:
logger.error("Failed closing plugin %s: %s", plugin, ex)
logger.debug("Failed closing plugin: %s",
traceback.format_exc(ex))
logger.debug(
"Failed closing plugin: %s", traceback.format_exc(ex))
@property
def artifacts_dir(self):
if not self._artifacts_dir:
if not self.artifacts_dir_name:
date_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S.")
date_str = datetime.datetime.now().strftime(
"%Y-%m-%d_%H-%M-%S.")
self.artifacts_dir_name = tempfile.mkdtemp(
"", date_str, self.artifacts_base_dir)
elif not os.path.isdir(self.artifacts_dir_name):
@ -621,8 +612,9 @@ class ConfigManager(object):
def load_files(self, configs):
""" Read configs set into storage """
logger.debug("Reading configs: %s", configs)
config_filenames = [resource.resource_filename(config)
for config in configs]
config_filenames = [
resource.resource_filename(config) for config in configs
]
try:
self.config.read(config_filenames)
except Exception as ex:
@ -644,13 +636,13 @@ class ConfigManager(object):
try:
for option in self.config.options(section):
if not prefix or option.find(prefix) == 0:
res += [(option[len(prefix):],
self.config.get(section, option))]
res += [(
option[len(prefix):], self.config.get(section, option))]
except ConfigParser.NoSectionError as ex:
logger.warning("No section: %s", ex)
logger.debug("Section: [%s] prefix: '%s' options:\n%s", section,
prefix, res)
logger.debug(
"Section: [%s] prefix: '%s' options:\n%s", section, prefix, res)
return res
def find_sections(self, prefix):

View File

@ -38,8 +38,8 @@ class Worker(object):
np.linspace(5000, 9900, 50)) # 100µs accuracy
bins = np.append(bins,
np.linspace(10, 499, 490) * 1000) # 1ms accuracy
bins = np.append(bins, np.linspace(500, 2995, 500) *
1000) # 5ms accuracy
bins = np.append(bins,
np.linspace(500, 2995, 500) * 1000) # 5ms accuracy
bins = np.append(bins, np.linspace(3000, 9990, 700) *
1000) # 10ms accuracy
bins = np.append(bins, np.linspace(10000, 29950, 400) *
@ -119,7 +119,6 @@ class Worker(object):
class DataPoller(object):
def __init__(self, source, poll_period):
self.poll_period = poll_period
self.source = source
@ -132,7 +131,6 @@ class DataPoller(object):
class Aggregator(object):
def __init__(self, source, config, verbose_histogram):
self.worker = Worker(config, verbose_histogram)
self.source = source
@ -144,12 +142,11 @@ class Aggregator(object):
start_time = time.time()
result = {
"ts": ts,
"tagged": {
tag: self.worker.aggregate(data)
for tag, data in by_tag
},
"tagged":
{tag: self.worker.aggregate(data)
for tag, data in by_tag},
"overall": self.worker.aggregate(chunk),
}
logger.debug("Aggregation time: %.2fms",
(time.time() - start_time) * 1000)
logger.debug(
"Aggregation time: %.2fms", (time.time() - start_time) * 1000)
yield result

View File

@ -23,9 +23,8 @@ class TimeChopper(object):
grouped = chunk.groupby(level=0)
for group_key, group_data in list(grouped):
if group_key in self.cache:
self.cache[group_key] = pd.concat([
self.cache[group_key], group_data
])
self.cache[group_key] = pd.concat(
[self.cache[group_key], group_data])
else:
self.cache[group_key] = group_data
while len(self.cache) > self.cache_size:

View File

@ -59,8 +59,8 @@ class Plugin(AbstractPlugin):
return ["verbose_histogram"]
def configure(self):
self.aggregator_config = json.loads(resource_string(
__name__, 'config/phout.json').decode('utf8'))
self.aggregator_config = json.loads(
resource_string(__name__, 'config/phout.json').decode('utf8'))
verbose_histogram_option = self.get_option("verbose_histogram", "0")
self.verbose_histogram = (
verbose_histogram_option.lower() == "true") or (
@ -72,16 +72,15 @@ class Plugin(AbstractPlugin):
if self.reader and self.stats_reader:
pipeline = Aggregator(
TimeChopper(
DataPoller(source=self.reader,
poll_period=1),
cache_size=3),
DataPoller(
source=self.reader, poll_period=1), cache_size=3),
self.aggregator_config,
self.verbose_histogram)
self.drain = Drain(pipeline, self.results)
self.drain.start()
self.stats_drain = Drain(
Chopper(DataPoller(source=self.stats_reader,
poll_period=1)),
Chopper(DataPoller(
source=self.stats_reader, poll_period=1)),
self.stats)
self.stats_drain.start()
else:

View File

@ -7,7 +7,6 @@ from conftest import MAX_TS, random_split
class TestChopper(object):
def test_one_chunk(self, data):
chopper = TimeChopper([data], 5)
result = list(chopper)
@ -29,8 +28,9 @@ class TestChopper(object):
chopper = TimeChopper(chunks, 5)
result = list(chopper)
assert len(
result) == MAX_TS, "DataFrame is splitted into proper number of chunks"
result
) == MAX_TS, "DataFrame is splitted into proper number of chunks"
concatinated = pd.concat(r[1] for r in result)
assert len(data) == len(concatinated), "We did not lose anything"
assert np.allclose(concatinated.values,
data.values), "We did not corrupt the data"
assert np.allclose(
concatinated.values, data.values), "We did not corrupt the data"

View File

@ -9,12 +9,12 @@ from yandextank.plugins.Aggregator.aggregator import Aggregator
from yandextank.plugins.Aggregator.chopper import TimeChopper
from yandextank.plugins.Aggregator.plugin import DataPoller
AGGR_CONFIG = json.loads(resource_string("yandextank.plugins.Aggregator",
'config/phout.json').decode('utf-8'))
AGGR_CONFIG = json.loads(
resource_string("yandextank.plugins.Aggregator", 'config/phout.json')
.decode('utf-8'))
class TestPipeline(object):
def test_partially_reversed_data(self, data):
results_queue = Queue()
chunks = list(random_split(data))
@ -22,9 +22,8 @@ class TestPipeline(object):
pipeline = Aggregator(
TimeChopper(
DataPoller(source=chunks,
poll_period=0.1),
cache_size=3),
DataPoller(
source=chunks, poll_period=0.1), cache_size=3),
AGGR_CONFIG,
False)
drain = Drain(pipeline, results_queue)
@ -44,9 +43,8 @@ class TestPipeline(object):
pipeline = Aggregator(
TimeChopper(
DataPoller(source=producer(),
poll_period=0.1),
cache_size=3),
DataPoller(
source=producer(), poll_period=0.1), cache_size=3),
AGGR_CONFIG,
False)
drain = Drain(pipeline, results_queue)

View File

@ -8,5 +8,5 @@ def test_random_split(data):
assert len(dataframes) > 1
concatinated = pd.concat(dataframes)
assert len(concatinated) == len(data), "We did not lose anything"
assert np.allclose(concatinated.values,
data.values), "We did not corrupt the data"
assert np.allclose(
concatinated.values, data.values), "We did not corrupt the data"

View File

@ -44,10 +44,11 @@ class Plugin(AbstractPlugin):
process_stdout_file = self.core.mkstemp(".log", "appium_stdout_")
self.core.add_artifact_file(process_stdout_file)
self.process_stdout = open(process_stdout_file, 'w')
self.process = subprocess.Popen(args,
stderr=self.process_stdout,
stdout=self.process_stdout,
close_fds=True)
self.process = subprocess.Popen(
args,
stderr=self.process_stdout,
stdout=self.process_stdout,
close_fds=True)
logger.info("Waiting 5 seconds for Appium to start...")
time.sleep(5)
@ -61,8 +62,8 @@ class Plugin(AbstractPlugin):
def end_test(self, retcode):
if self.process and self.process.poll() is None:
logger.info("Terminating appium process with PID %s",
self.process.pid)
logger.info(
"Terminating appium process with PID %s", self.process.pid)
self.process.terminate()
if self.process_stdout:
self.process_stdout.close()

View File

@ -20,14 +20,13 @@ class AvgTimeCriterion(AbstractCriterion):
def __init__(self, autostop, param_str):
AbstractCriterion.__init__(self)
self.seconds_count = 0
self.rt_limit = expand_to_milliseconds(param_str.split(',')[
0])
self.seconds_limit = expand_to_seconds(param_str.split(',')[
1])
self.rt_limit = expand_to_milliseconds(param_str.split(',')[0])
self.seconds_limit = expand_to_seconds(param_str.split(',')[1])
self.autostop = autostop
def notify(self, data, stat):
if (data["overall"]["interval_real"]["total"] / 1000.0 /
if (
data["overall"]["interval_real"]["total"] / 1000.0 /
data["overall"]["interval_real"]["len"]) > self.rt_limit:
if not self.seconds_count:
self.cause_second = (data, stat)
@ -47,10 +46,10 @@ class AvgTimeCriterion(AbstractCriterion):
return self.RC_TIME
def explain(self):
explanation = ("Average response time higher"
" than %sms for %ss, since %s" %
(self.rt_limit, self.seconds_count,
self.cause_second[0]["ts"]))
explanation = (
"Average response time higher"
" than %sms for %ss, since %s" %
(self.rt_limit, self.seconds_count, self.cause_second[0]["ts"]))
return explanation
def widget_explain(self):
@ -80,8 +79,7 @@ class HTTPCodesCriterion(AbstractCriterion):
else:
self.level = int(level_str)
self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[
2])
self.seconds_limit = expand_to_seconds(param_str.split(',')[2])
def notify(self, data, stat):
matched_responses = self.count_matched_codes(
@ -92,8 +90,9 @@ class HTTPCodesCriterion(AbstractCriterion):
"interval_real"]["len"]
else:
matched_responses = 0
logger.debug("HTTP codes matching mask %s: %s/%s", self.codes_mask,
matched_responses, self.level)
logger.debug(
"HTTP codes matching mask %s: %s/%s", self.codes_mask,
matched_responses, self.level)
if matched_responses >= self.level:
if not self.seconds_count:
@ -122,13 +121,15 @@ class HTTPCodesCriterion(AbstractCriterion):
return level_str
def explain(self):
items = (self.codes_mask, self.get_level_str(), self.seconds_count,
self.cause_second[0].get('ts'))
items = (
self.codes_mask, self.get_level_str(), self.seconds_count,
self.cause_second[0].get('ts'))
return "%s codes count higher than %s for %ss, since %s" % items
def widget_explain(self):
items = (self.codes_mask, self.get_level_str(), self.seconds_count,
self.seconds_limit)
items = (
self.codes_mask, self.get_level_str(), self.seconds_count,
self.seconds_limit)
return "HTTP %s>%s for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit
@ -154,8 +155,7 @@ class NetCodesCriterion(AbstractCriterion):
else:
self.level = int(level_str)
self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[
2])
self.seconds_limit = expand_to_seconds(param_str.split(',')[2])
def notify(self, data, stat):
codes = copy.deepcopy(data["overall"]["net_code"]["count"])
@ -168,8 +168,9 @@ class NetCodesCriterion(AbstractCriterion):
"interval_real"]["len"]
else:
matched_responses = 0
logger.debug("Net codes matching mask %s: %s/%s", self.codes_mask,
matched_responses, self.level)
logger.debug(
"Net codes matching mask %s: %s/%s", self.codes_mask,
matched_responses, self.level)
if matched_responses >= self.level:
if not self.seconds_count:
@ -198,13 +199,15 @@ class NetCodesCriterion(AbstractCriterion):
return level_str
def explain(self):
items = (self.codes_mask, self.get_level_str(), self.seconds_count,
self.cause_second[0].get("ts"))
items = (
self.codes_mask, self.get_level_str(), self.seconds_count,
self.cause_second[0].get("ts"))
return "%s net codes count higher than %s for %ss, since %s" % items
def widget_explain(self):
items = (self.codes_mask, self.get_level_str(), self.seconds_count,
self.seconds_limit)
items = (
self.codes_mask, self.get_level_str(), self.seconds_count,
self.seconds_limit)
return "Net %s>%s for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit
@ -225,8 +228,10 @@ class QuantileCriterion(AbstractCriterion):
self.autostop = autostop
def notify(self, data, stat):
quantiles = dict(zip(data["overall"]["interval_real"]["q"]["q"], data[
"overall"]["interval_real"]["q"]["value"]))
quantiles = dict(
zip(
data["overall"]["interval_real"]["q"]["q"], data["overall"][
"interval_real"]["q"]["value"]))
if self.quantile not in quantiles.keys():
logger.warning("No quantile %s in %s", self.quantile, quantiles)
if self.quantile in quantiles.keys() \
@ -249,13 +254,15 @@ class QuantileCriterion(AbstractCriterion):
return self.RC_TIME
def explain(self):
items = (self.quantile, self.rt_limit, self.seconds_count,
self.cause_second[0].get("ts"))
items = (
self.quantile, self.rt_limit, self.seconds_count,
self.cause_second[0].get("ts"))
return "Percentile %s higher than %sms for %ss, since %s" % items
def widget_explain(self):
items = (self.quantile, self.rt_limit, self.seconds_count,
self.seconds_limit)
items = (
self.quantile, self.rt_limit, self.seconds_count,
self.seconds_limit)
return "%s%% >%sms for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit
@ -272,13 +279,12 @@ class SteadyCumulativeQuantilesCriterion(AbstractCriterion):
AbstractCriterion.__init__(self)
self.seconds_count = 0
self.quantile_hash = ""
self.seconds_limit = expand_to_seconds(param_str.split(',')[
0])
self.seconds_limit = expand_to_seconds(param_str.split(',')[0])
self.autostop = autostop
def notify(self, data, stat):
quantiles = dict(zip(data["overall"]["q"]["q"], data["overall"]["q"][
"values"]))
quantiles = dict(
zip(data["overall"]["q"]["q"], data["overall"]["q"]["values"]))
quantile_hash = json.dumps(quantiles)
logging.debug("Cumulative quantiles hash: %s", quantile_hash)
if self.quantile_hash == quantile_hash:

View File

@ -13,7 +13,6 @@ logger = logging.getLogger(__name__)
class WindowCounter(object):
def __init__(self, window_size):
self.window_size = window_size
self.value = 0.0
@ -60,8 +59,8 @@ class TotalFracTimeCriterion(AbstractCriterion):
def __fail_count(self, data):
ecdf = np.cumsum(data["overall"]["interval_real"]["hist"]["data"])
idx = np.searchsorted(data["overall"]["interval_real"]["hist"]["bins"],
self.rt_limit)
idx = np.searchsorted(
data["overall"]["interval_real"]["hist"]["bins"], self.rt_limit)
if idx == 0:
return ecdf[-1]
elif idx == len(ecdf):
@ -73,8 +72,8 @@ class TotalFracTimeCriterion(AbstractCriterion):
self.seconds.append((data, stat))
self.fail_counter.push(self.__fail_count(data))
self.total_counter.push(data["overall"]["interval_real"]["len"])
self.total_fail_ratio = (self.fail_counter.value /
self.total_counter.value)
self.total_fail_ratio = (
self.fail_counter.value / self.total_counter.value)
if self.total_fail_ratio >= self.fail_ratio_limit and len(
self.fail_counter) >= self.window_size:
self.cause_second = self.seconds[0]
@ -88,15 +87,17 @@ class TotalFracTimeCriterion(AbstractCriterion):
return 25
def explain(self):
return ("%.2f%% responses times higher "
"than %sms for %ss since: %s" %
(self.total_fail_ratio * 100, self.rt_limit / 1000,
self.window_size, self.cause_second[0]["ts"]))
return (
"%.2f%% responses times higher "
"than %sms for %ss since: %s" % (
self.total_fail_ratio * 100, self.rt_limit / 1000,
self.window_size, self.cause_second[0]["ts"]))
def widget_explain(self):
return ("%.2f%% times >%sms for %ss" %
(self.total_fail_ratio * 100, self.rt_limit / 1000,
self.window_size), self.total_fail_ratio)
return (
"%.2f%% times >%sms for %ss" % (
self.total_fail_ratio * 100, self.rt_limit / 1000,
self.window_size), self.total_fail_ratio)
class TotalHTTPCodesCriterion(AbstractCriterion):
@ -122,8 +123,7 @@ class TotalHTTPCodesCriterion(AbstractCriterion):
else:
self.level = int(level_str)
self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[
2])
self.seconds_limit = expand_to_seconds(param_str.split(',')[2])
def notify(self, data, stat):
matched_responses = self.count_matched_codes(
@ -134,8 +134,9 @@ class TotalHTTPCodesCriterion(AbstractCriterion):
"interval_real"]["len"] * 100
else:
matched_responses = 1
logger.debug("HTTP codes matching mask %s: %s/%s", self.codes_mask,
matched_responses, self.level)
logger.debug(
"HTTP codes matching mask %s: %s/%s", self.codes_mask,
matched_responses, self.level)
self.data.append(matched_responses)
self.second_window.append((data, stat))
if len(self.data) > self.seconds_limit:
@ -144,11 +145,10 @@ class TotalHTTPCodesCriterion(AbstractCriterion):
queue_len = 1
if self.is_relative:
queue_len = len(self.data)
if (sum(self.data) / queue_len) >= self.level and len(
self.data) >= self.seconds_limit:
if (sum(self.data) / queue_len) >= self.level\
and len(self.data) >= self.seconds_limit: # yapf:disable
self.cause_second = self.second_window[0]
logger.debug(self.explain())
# self.autostop.add_counting(self)
return True
return False
@ -165,12 +165,15 @@ class TotalHTTPCodesCriterion(AbstractCriterion):
def explain(self):
if self.is_relative:
items = (self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return ("%s codes count higher "
"than %s for %ss, ended at: %s" % items)
items = (self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
items = (
self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return (
"%s codes count higher "
"than %s for %ss, ended at: %s" % items)
items = (
self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return "%s codes count higher than %s for %ss, since %s" % items
def widget_explain(self):
@ -204,8 +207,7 @@ class TotalNetCodesCriterion(AbstractCriterion):
else:
self.level = int(level_str)
self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[
2])
self.seconds_limit = expand_to_seconds(param_str.split(',')[2])
def notify(self, data, stat):
codes = data["overall"]["net_code"]["count"].copy()
@ -216,14 +218,15 @@ class TotalNetCodesCriterion(AbstractCriterion):
if data["overall"]["interval_real"]["len"]:
matched_responses = float(matched_responses) / data["overall"][
"interval_real"]["len"] * 100
logger.debug("Net codes matching mask %s: %s%%/%s",
self.codes_mask, round(matched_responses,
2), self.get_level_str())
logger.debug(
"Net codes matching mask %s: %s%%/%s", self.codes_mask,
round(matched_responses, 2), self.get_level_str())
else:
matched_responses = 1
else:
logger.debug("Net codes matching mask %s: %s/%s", self.codes_mask,
matched_responses, self.get_level_str())
logger.debug(
"Net codes matching mask %s: %s/%s", self.codes_mask,
matched_responses, self.get_level_str())
self.data.append(matched_responses)
self.second_window.append((data, stat))
@ -234,11 +237,10 @@ class TotalNetCodesCriterion(AbstractCriterion):
queue_len = 1
if self.is_relative:
queue_len = len(self.data)
if (sum(self.data) / queue_len) >= self.level and len(
self.data) >= self.seconds_limit:
if (sum(self.data) / queue_len) >= self.level\
and len(self.data) >= self.seconds_limit: # yapf:disable
self.cause_second = self.second_window[0]
logger.debug(self.explain())
# self.autostop.add_counting(self)
return True
return False
@ -255,12 +257,15 @@ class TotalNetCodesCriterion(AbstractCriterion):
def explain(self):
if self.is_relative:
items = (self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return ("%s net codes count higher "
"than %s for %ss, since %s" % items)
items = (self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
items = (
self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return (
"%s net codes count higher "
"than %s for %ss, since %s" % items)
items = (
self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return "%s net codes count higher than %s for %ss, since %s" % items
def widget_explain(self):
@ -294,8 +299,7 @@ class TotalNegativeHTTPCodesCriterion(AbstractCriterion):
else:
self.level = int(level_str)
self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[
2])
self.seconds_limit = expand_to_seconds(param_str.split(',')[2])
def notify(self, data, stat):
matched_responses = self.count_matched_codes(
@ -307,14 +311,15 @@ class TotalNegativeHTTPCodesCriterion(AbstractCriterion):
matched_responses = 100 - matched_responses
else:
matched_responses = 1
logger.debug("HTTP codes matching mask not %s: %s/%s",
self.codes_mask, round(matched_responses,
1), self.level)
logger.debug(
"HTTP codes matching mask not %s: %s/%s", self.codes_mask,
round(matched_responses, 1), self.level)
else:
matched_responses = (
data["overall"]["interval_real"]["len"] - matched_responses)
logger.debug("HTTP codes matching mask not %s: %s/%s",
self.codes_mask, matched_responses, self.level)
logger.debug(
"HTTP codes matching mask not %s: %s/%s", self.codes_mask,
matched_responses, self.level)
self.data.append(matched_responses)
self.second_window.append((data, stat))
if len(self.data) > self.seconds_limit:
@ -324,11 +329,10 @@ class TotalNegativeHTTPCodesCriterion(AbstractCriterion):
queue_len = 1
if self.is_relative:
queue_len = len(self.data)
if (sum(self.data) / queue_len) >= self.level and len(
self.data) >= self.seconds_limit:
if (sum(self.data) / queue_len) >= self.level\
and len(self.data) >= self.seconds_limit: # yapf:disable
self.cause_second = self.second_window[0]
logger.debug(self.explain())
# self.autostop.add_counting(self)
return True
return False
@ -345,12 +349,15 @@ class TotalNegativeHTTPCodesCriterion(AbstractCriterion):
def explain(self):
if self.is_relative:
items = (self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return ("Not %s codes count higher "
"than %s for %ss, since %s" % items)
items = (self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
items = (
self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return (
"Not %s codes count higher "
"than %s for %ss, since %s" % items)
items = (
self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return "Not %s codes count higher than %s for %ss, since %s" % items
def widget_explain(self):
@ -384,8 +391,7 @@ class TotalNegativeNetCodesCriterion(AbstractCriterion):
else:
self.level = int(level_str)
self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[
2])
self.seconds_limit = expand_to_seconds(param_str.split(',')[2])
def notify(self, data, stat):
codes = data["overall"]["net_code"]["count"].copy()
@ -399,14 +405,15 @@ class TotalNegativeNetCodesCriterion(AbstractCriterion):
matched_responses = 100 - matched_responses
else:
matched_responses = 1
logger.debug("Net codes matching mask not %s: %s/%s",
self.codes_mask, round(matched_responses,
1), self.level)
logger.debug(
"Net codes matching mask not %s: %s/%s", self.codes_mask,
round(matched_responses, 1), self.level)
else:
matched_responses = (
data["overall"]["interval_real"]["len"] - matched_responses)
logger.debug("Net codes matching mask not %s: %s/%s",
self.codes_mask, matched_responses, self.level)
logger.debug(
"Net codes matching mask not %s: %s/%s", self.codes_mask,
matched_responses, self.level)
self.data.append(matched_responses)
self.second_window.append((data, stat))
if len(self.data) > self.seconds_limit:
@ -416,8 +423,8 @@ class TotalNegativeNetCodesCriterion(AbstractCriterion):
queue_len = 1
if self.is_relative:
queue_len = len(self.data)
if (sum(self.data) / queue_len) >= self.level and len(
self.data) >= self.seconds_limit:
if (sum(self.data) / queue_len) >= self.level \
and len(self.data) >= self.seconds_limit: # yapf:disable
self.cause_second = self.second_window[0]
logger.debug(self.explain())
return True
@ -436,12 +443,15 @@ class TotalNegativeNetCodesCriterion(AbstractCriterion):
def explain(self):
if self.is_relative:
items = (self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return ("Not %s codes count higher "
"than %s for %ss, since %s" % items)
items = (self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
items = (
self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return (
"Not %s codes count higher "
"than %s for %ss, since %s" % items)
items = (
self.codes_mask, self.get_level_str(), self.seconds_limit,
self.cause_second[0]["ts"])
return "Not %s codes count higher than %s for %ss, since %s" % items
def widget_explain(self):
@ -471,8 +481,7 @@ class TotalHTTPTrendCriterion(AbstractCriterion):
self.tangents.append(0)
self.last = 0
self.seconds_limit = expand_to_seconds(param_str.split(',')[
1])
self.seconds_limit = expand_to_seconds(param_str.split(',')[1])
self.measurement_error = float()
def notify(self, data, stat):
@ -491,8 +500,9 @@ class TotalHTTPTrendCriterion(AbstractCriterion):
self.measurement_error = self.calc_measurement_error(self.tangents)
self.total_tan = float(sum(self.tangents) / len(self.tangents))
logger.debug("Last trend for http codes %s: %.2f +/- %.2f",
self.codes_mask, self.total_tan, self.measurement_error)
logger.debug(
"Last trend for http codes %s: %.2f +/- %.2f", self.codes_mask,
self.total_tan, self.measurement_error)
if self.total_tan + self.measurement_error < 0:
self.cause_second = self.second_window[0]
@ -521,14 +531,17 @@ class TotalHTTPTrendCriterion(AbstractCriterion):
return 30
def explain(self):
items = (self.codes_mask, self.total_tan, self.measurement_error,
self.seconds_limit, self.cause_second[0]["ts"])
return ("Last trend for %s http codes "
"is %.2f +/- %.2f for %ss, since %s" % items)
items = (
self.codes_mask, self.total_tan, self.measurement_error,
self.seconds_limit, self.cause_second[0]["ts"])
return (
"Last trend for %s http codes "
"is %.2f +/- %.2f for %ss, since %s" % items)
def widget_explain(self):
items = (self.codes_mask, self.total_tan, self.measurement_error,
self.seconds_limit)
items = (
self.codes_mask, self.total_tan, self.measurement_error,
self.seconds_limit)
return ("HTTP(%s) trend is %.2f +/- %.2f < 0 for %ss" % items, 1.0)
@ -543,6 +556,7 @@ class QuantileOfSaturationCriterion(AbstractCriterion):
def __init__(self, autostop, param_str):
AbstractCriterion.__init__(self)
raise NotImplementedError
# self.autostop = autostop
# self.data = deque()
# self.second_window = deque()

View File

@ -51,8 +51,8 @@ class Plugin(AbstractPlugin, AggregateResultListener):
aggregator = self.core.get_plugin_of_type(AggregatorPlugin)
aggregator.add_result_listener(self)
self.criterion_str = " ".join(self.get_option("autostop", '').split(
"\n"))
self.criterion_str = " ".join(
self.get_option("autostop", '').split("\n"))
self._stop_report_path = os.path.join(
self.core.artifacts_dir,
self.get_option("report_file", 'autostop_report.txt'))
@ -92,8 +92,9 @@ class Plugin(AbstractPlugin, AggregateResultListener):
def is_test_finished(self):
if self.cause_criterion:
self.log.info("Autostop criterion requested test stop: %s",
self.cause_criterion.explain())
self.log.info(
"Autostop criterion requested test stop: %s",
self.cause_criterion.explain())
return self.cause_criterion.get_rc()
else:
return -1
@ -107,8 +108,8 @@ class Plugin(AbstractPlugin, AggregateResultListener):
for criterion_class in self.custom_criterions:
if criterion_class.get_type_string() == type_str:
return criterion_class(self, parsed[1])
raise ValueError("Unsupported autostop criterion type: %s" %
criterion_str)
raise ValueError(
"Unsupported autostop criterion type: %s" % criterion_str)
def on_aggregated_data(self, data, stat):
self.counting = []
@ -116,8 +117,7 @@ class Plugin(AbstractPlugin, AggregateResultListener):
for criterion_text, criterion in self._criterions.iteritems():
if criterion.notify(data, stat):
self.log.debug(
"Autostop criterion requested test stop: %s",
criterion)
"Autostop criterion requested test stop: %s", criterion)
self.cause_criterion = criterion
open(self._stop_report_path, 'w').write(criterion_text)
self.core.add_artifact_file(self._stop_report_path)

View File

@ -22,10 +22,13 @@ class Plugin(AbstractPlugin):
self.default_target = None
self.device_id = None
self.cmds = {
"enable_full_log": "adb %s shell dumpsys batterystats --enable full-wake-history",
"disable_full_log": "adb %s shell dumpsys batterystats --disable full-wake-history",
"enable_full_log":
"adb %s shell dumpsys batterystats --enable full-wake-history",
"disable_full_log":
"adb %s shell dumpsys batterystats --disable full-wake-history",
"reset": "adb %s shell dumpsys batterystats --reset",
"dump": "adb %s shell dumpsys batterystats"}
"dump": "adb %s shell dumpsys batterystats"
}
def get_available_options(self):
return ["device_id"]
@ -57,8 +60,7 @@ class Plugin(AbstractPlugin):
try:
logger.debug('dumping battery stats')
dump = subprocess.Popen(
self.cmds['dump'],
stdout=subprocess.PIPE,
self.cmds['dump'], stdout=subprocess.PIPE,
shell=True).communicate()[0]
out = subprocess.check_output(
self.cmds['disable_full_log'], shell=True)

View File

@ -40,4 +40,7 @@ def scenario_2(missile, marker, measure):
SCENARIOS module variable is used by Tank to choose the scenario to
shoot with. For each missile Tank will look up missile marker in this dict.
"""
SCENARIOS = {"scenario_1": scenario_1, "scenario_2": scenario_1, }
SCENARIOS = {
"scenario_1": scenario_1,
"scenario_2": scenario_1,
}

View File

@ -3,7 +3,6 @@ log = logging.getLogger(__name__)
class LoadTest(object):
def __init__(self, gun):
self.gun = gun

View File

@ -16,7 +16,6 @@ requests.packages.urllib3.disable_warnings()
class AbstractGun(AbstractPlugin):
def __init__(self, core):
super(AbstractGun, self).__init__(core)
self.results = None
@ -49,8 +48,8 @@ class AbstractGun(AbstractPlugin):
raise
finally:
if data_item.get("interval_real") is None:
data_item["interval_real"] = int((time.time() - start_time) *
1e6)
data_item["interval_real"] = int(
(time.time() - start_time) * 1e6)
self.results.put(data_item, timeout=1)
@ -152,8 +151,8 @@ class CustomGun(AbstractGun):
module_name = self.get_option("module_name")
fp, pathname, description = imp.find_module(module_name, module_path)
try:
self.module = imp.load_module(module_name, fp, pathname,
description)
self.module = imp.load_module(
module_name, fp, pathname, description)
finally:
if fp:
fp.close()
@ -186,8 +185,8 @@ class ScenarioGun(AbstractGun):
module_name = self.get_option("module_name")
fp, pathname, description = imp.find_module(module_name, module_path)
try:
self.module = imp.load_module(module_name, fp, pathname,
description)
self.module = imp.load_module(
module_name, fp, pathname, description)
finally:
if fp:
fp.close()
@ -232,8 +231,8 @@ class UltimateGun(AbstractGun):
# it is imported to be sure Python won't be able to cache it
#
try:
self.module = imp.load_module("%s_%d" % (module_name, time.time()),
fp, pathname, description)
self.module = imp.load_module(
"%s_%d" % (module_name, time.time()), fp, pathname, description)
finally:
if fp:
fp.close()

View File

@ -62,17 +62,18 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
if gun_type in self.gun_classes:
self.gun = self.gun_classes[gun_type](self.core)
else:
raise NotImplementedError('No such gun type implemented: "%s"' %
gun_type)
raise NotImplementedError(
'No such gun type implemented: "%s"' % gun_type)
cached_stpd_option = self.get_option("cached_stpd", '0')
if cached_stpd_option == '1':
cached_stpd = True
else:
cached_stpd = False
self.bfg = BFG(gun=self.gun,
instances=self.stepper_wrapper.instances,
stpd_filename=self.stepper_wrapper.stpd,
cached_stpd=cached_stpd)
self.bfg = BFG(
gun=self.gun,
instances=self.stepper_wrapper.instances,
stpd_filename=self.stepper_wrapper.stpd,
cached_stpd=cached_stpd)
aggregator = None
try:
aggregator = self.core.get_plugin_of_type(AggregatorPlugin)

View File

@ -14,11 +14,10 @@ def records_to_df(records):
def _expand_steps(steps):
return list(itt.chain(*[[rps] * int(duration) for rps, duration in steps]))
return list(itt.chain(* [[rps] * int(duration) for rps, duration in steps]))
class BfgReader(object):
def __init__(self, results):
self.buffer = ""
self.stat_buffer = ""
@ -43,7 +42,6 @@ class BfgReader(object):
class BfgStatsReader(object):
def __init__(self, instance_counter, steps):
self.closed = False
self.last_ts = 0
@ -59,9 +57,13 @@ class BfgStatsReader(object):
reqps = 0
if offset >= 0 and offset < len(self.steps):
reqps = self.steps[offset]
yield [{'ts': cur_ts,
'metrics': {'instances': self.instance_counter.value,
'reqps': reqps}}]
yield [{
'ts': cur_ts,
'metrics': {
'instances': self.instance_counter.value,
'reqps': reqps
}
}]
self.last_ts = cur_ts
else:
yield []

View File

@ -33,8 +33,7 @@ class BfgInfoWidget(AbstractInfoWidget):
res += str(self.instances)
res += "\nPlanned requests: %s for %s\nActual responses: " % (
self.planned,
datetime.timedelta(seconds=self.planned_rps_duration))
self.planned, datetime.timedelta(seconds=self.planned_rps_duration))
if not self.planned == self.RPS:
res += screen.markup.YELLOW + str(self.RPS) + screen.markup.RESET
else:

View File

@ -16,13 +16,15 @@ class BFG(object):
"""
def __init__(self, gun, instances, stpd_filename, cached_stpd=False):
logger.info("""
logger.info(
"""
BFG using stpd from {stpd_filename}
Instances: {instances}
Gun: {gun.__class__.__name__}
""".format(stpd_filename=stpd_filename,
instances=instances,
gun=gun, ))
""".format(
stpd_filename=stpd_filename,
instances=instances,
gun=gun, ))
self.instances = int(instances)
self.instance_counter = mp.Value('i')
self.results = mp.Queue()
@ -60,9 +62,12 @@ Gun: {gun.__class__.__name__}
Say the workers to finish their jobs and quit.
"""
self.quit.set()
while sorted([self.pool[i].is_alive()
for i in xrange(len(self.pool))])[-1]:
# yapf:disable
while sorted([
self.pool[i].is_alive()
for i in xrange(len(self.pool))])[-1]:
time.sleep(1)
# yapf:enable
try:
while not self.task_queue.empty():
self.task_queue.get(timeout=0.1)
@ -94,19 +99,20 @@ Gun: {gun.__class__.__name__}
else:
continue
workers_count = self.instances
logger.info("Feeded all data. Publishing %d killer tasks" %
(workers_count))
logger.info(
"Feeded all data. Publishing %d killer tasks" % (workers_count))
retry_delay = 1
for _ in range(5):
try:
[self.task_queue.put(None,
timeout=1)
for _ in xrange(0, workers_count)]
[
self.task_queue.put(None, timeout=1)
for _ in xrange(0, workers_count)
]
break
except Full:
logger.debug("Couldn't post killer tasks"
" because queue is full. Retrying in %ss",
retry_delay)
logger.debug(
"Couldn't post killer tasks"
" because queue is full. Retrying in %ss", retry_delay)
time.sleep(retry_delay)
retry_delay *= 2
@ -160,8 +166,7 @@ Gun: {gun.__class__.__name__}
logger.debug("Empty queue. Exiting process")
return
except Full:
logger.warning(
"Couldn't put to result queue because it's full")
logger.warning("Couldn't put to result queue because it's full")
except Exception:
logger.exception("Bfg shoot exception")

View File

@ -40,8 +40,8 @@ class Plugin(AbstractPlugin, AggregateResultListener):
]
def configure(self):
self.info_panel_width = self.get_option("info_panel_width",
self.info_panel_width)
self.info_panel_width = self.get_option(
"info_panel_width", self.info_panel_width)
self.short_only = int(self.get_option("short_only", '0'))
if not int(self.get_option("disable_all_colors", '0')):
self.console_markup = RealConsoleMarkup()
@ -97,15 +97,17 @@ class Plugin(AbstractPlugin, AggregateResultListener):
if self.short_only:
overall = data.get('overall')
quantiles = dict(zip(overall['interval_real']['q']['q'], overall[
'interval_real']['q']['value']))
quantiles = dict(
zip(
overall['interval_real']['q']['q'], overall['interval_real']
['q']['value']))
info = (
"ts:{ts}\tRPS:{rps}\tavg:{avg_rt:.2f}\t"
"min:{min:.2f}\tmax:{q100:.2f}\tq95:{q95:.2f}\t").format(
ts=data.get('ts'),
rps=overall['interval_real']['len'],
avg_rt=float(overall['interval_real']['total']) / overall[
'interval_real']['len'] / 1000.0,
avg_rt=float(overall['interval_real']['total']) /
overall['interval_real']['len'] / 1000.0,
min=overall['interval_real']['min'] / 1000.0,
q100=quantiles[100] / 1000,
q95=quantiles[95] / 1000)
@ -120,6 +122,7 @@ class Plugin(AbstractPlugin, AggregateResultListener):
else:
self.screen.add_info_widget(widget)
# ======================================================
@ -147,12 +150,15 @@ class RealConsoleMarkup(object):
def clean_markup(self, orig_str):
''' clean markup from string '''
for val in [self.YELLOW, self.RED, self.RESET, self.CYAN,
self.BG_MAGENTA, self.WHITE, self.BG_GREEN, self.GREEN,
self.BG_BROWN, self.RED_DARK, self.MAGENTA, self.BG_CYAN]:
for val in [
self.YELLOW, self.RED, self.RESET, self.CYAN, self.BG_MAGENTA,
self.WHITE, self.BG_GREEN, self.GREEN, self.BG_BROWN,
self.RED_DARK, self.MAGENTA, self.BG_CYAN
]:
orig_str = orig_str.replace(val, '')
return orig_str
# ======================================================
# FIXME: 3 better way to have it?
@ -177,4 +183,5 @@ class NoConsoleMarkup(RealConsoleMarkup):
BG_BROWN = ''
BG_CYAN = ''
# ======================================================

View File

@ -22,8 +22,8 @@ def get_terminal_size():
Helper to get console size
'''
try:
sizes = struct.unpack('hh', fcntl.ioctl(file_d, termios.TIOCGWINSZ,
'1234'))
sizes = struct.unpack(
'hh', fcntl.ioctl(file_d, termios.TIOCGWINSZ, '1234'))
except Exception:
sizes = default_size
return sizes
@ -85,8 +85,8 @@ class Screen(object):
if len(right_line) > self.right_panel_width:
right_line_plain = self.markup.clean_markup(right_line)
if len(right_line_plain) > self.right_panel_width:
right_line = right_line[
: self.right_panel_width] + self.markup.RESET
right_line = right_line[:self.
right_panel_width] + self.markup.RESET
return right_line
def __render_left_panel(self):
@ -124,8 +124,8 @@ class Screen(object):
def render_screen(self):
''' Main method to render screen view '''
self.term_width, self.term_height = get_terminal_size()
self.log.debug("Terminal size: %sx%s", self.term_width,
self.term_height)
self.log.debug(
"Terminal size: %sx%s", self.term_width, self.term_height)
self.right_panel_width = int(
(self.term_width - len(self.RIGHT_PANEL_SEPARATOR)) *
(float(self.info_panel_percent) / 100)) - 1
@ -135,14 +135,14 @@ class Screen(object):
else:
self.right_panel_width = 0
self.left_panel_width = self.term_width - 1
self.log.debug("Left/right panels width: %s/%s", self.left_panel_width,
self.right_panel_width)
self.log.debug(
"Left/right panels width: %s/%s", self.left_panel_width,
self.right_panel_width)
widget_output = []
if self.right_panel_width:
widget_output = []
self.log.debug("There are %d info widgets" %
len(self.info_widgets))
self.log.debug("There are %d info widgets" % len(self.info_widgets))
for index, widget in sorted(
self.info_widgets.iteritems(),
key=lambda item: (item[1].get_index(), item[0])):
@ -164,11 +164,11 @@ class Screen(object):
left_line_plain = self.markup.clean_markup(left_line)
if len(left_line) > self.left_panel_width:
if len(left_line_plain) > self.left_panel_width:
left_line = left_line[
: self.left_panel_width] + self.markup.RESET
left_line = left_line[:self.
left_panel_width] + self.markup.RESET
left_line += (' ' *
(self.left_panel_width - len(left_line_plain)))
left_line += (
' ' * (self.left_panel_width - len(left_line_plain)))
line += left_line
else:
line += ' ' * self.left_panel_width
@ -223,6 +223,7 @@ class AbstractBlock:
'''
raise RuntimeError("Abstract method needs to be overridden")
# ======================================================
@ -270,8 +271,9 @@ class CurrentTimesDistBlock(AbstractBlock):
def add_second(self, data):
self.current_rps = data["overall"]["interval_real"]["len"]
self.hist = zip(data["overall"]["interval_real"]["hist"]["bins"],
data["overall"]["interval_real"]["hist"]["data"], )
self.hist = zip(
data["overall"]["interval_real"]["hist"]["bins"],
data["overall"]["interval_real"]["hist"]["data"], )
def render(self):
self.lines = []
@ -291,6 +293,7 @@ class CurrentTimesDistBlock(AbstractBlock):
self.width = max(self.width, len(self.lines[0]))
# ======================================================
@ -323,8 +326,8 @@ class CurrentHTTPBlock(AbstractBlock):
]
for code, count in sorted(self.times_dist.iteritems()):
line = self.format_line(code, count)
self.width = max(self.width,
len(self.screen.markup.clean_markup(line)))
self.width = max(
self.width, len(self.screen.markup.clean_markup(line)))
self.lines.append(line)
def format_line(self, code, count):
@ -358,6 +361,7 @@ class CurrentHTTPBlock(AbstractBlock):
return left_line
# ======================================================
@ -412,10 +416,11 @@ class CurrentNetBlock(AbstractBlock):
]
for code, count in sorted(self.times_dist.iteritems()):
line = self.format_line(code, count)
self.width = max(self.width,
len(self.screen.markup.clean_markup(line)))
self.width = max(
self.width, len(self.screen.markup.clean_markup(line)))
self.lines.append(line)
# ======================================================
@ -429,10 +434,12 @@ class CurrentQuantilesBlock(AbstractBlock):
self.quantiles = {}
def add_second(self, data):
self.quantiles = {k: v
for k, v in zip(data["overall"]["interval_real"][
"q"]["q"], data["overall"]["interval_real"]["q"][
"value"])}
self.quantiles = {
k: v
for k, v in zip(
data["overall"]["interval_real"]["q"]["q"], data["overall"][
"interval_real"]["q"]["value"])
}
def render(self):
self.lines = []
@ -442,10 +449,12 @@ class CurrentQuantilesBlock(AbstractBlock):
self.lines.append(line)
self.lines.reverse()
self.lines = [self.screen.markup.WHITE + 'Current Percentiles:' +
self.screen.markup.RESET] + self.lines
self.width = max(self.width,
len(self.screen.markup.clean_markup(self.lines[0])))
self.lines = [
self.screen.markup.WHITE + 'Current Percentiles:' +
self.screen.markup.RESET
] + self.lines
self.width = max(
self.width, len(self.screen.markup.clean_markup(self.lines[0])))
def __format_line(self, quan, timing):
''' Format line '''
@ -455,6 +464,7 @@ class CurrentQuantilesBlock(AbstractBlock):
left_line = tpl % data
return left_line
# ======================================================
@ -474,22 +484,24 @@ class AnswSizesBlock(AbstractBlock):
def render(self):
self.lines = [self.header]
if self.count:
self.lines.append(" Avg Request: %d bytes" %
(self.sum_out / self.count))
self.lines.append(" Avg Response: %d bytes" %
(self.sum_in / self.count))
self.lines.append(
" Avg Request: %d bytes" % (self.sum_out / self.count))
self.lines.append(
" Avg Response: %d bytes" % (self.sum_in / self.count))
self.lines.append("")
if self.cur_count:
self.lines.append(" Last Avg Request: %d bytes" %
(self.cur_out / self.cur_count))
self.lines.append(" Last Avg Response: %d bytes" %
(self.cur_in / self.cur_count))
self.lines.append(
" Last Avg Request: %d bytes" %
(self.cur_out / self.cur_count))
self.lines.append(
" Last Avg Response: %d bytes" %
(self.cur_in / self.cur_count))
else:
self.lines.append("")
self.lines.append("")
for line in self.lines:
self.width = max(self.width,
len(self.screen.markup.clean_markup(line)))
self.width = max(
self.width, len(self.screen.markup.clean_markup(line)))
def add_second(self, data):
@ -501,6 +513,7 @@ class AnswSizesBlock(AbstractBlock):
self.sum_in += self.cur_in
self.sum_out += self.cur_out
# ======================================================
@ -547,28 +560,41 @@ class AvgTimesBlock(AbstractBlock):
self.screen.markup.WHITE + self.header + self.screen.markup.RESET
]
if self.last_count:
len_all = str(len(str(max(
[self.all_connect, self.all_latency, self.all_overall,
self.all_receive, self.all_send]))))
len_last = str(len(str(max(
[self.last_connect, self.last_latency, self.last_overall,
self.last_receive, self.last_send]))))
len_all = str(
len(
str(
max([
self.all_connect, self.all_latency,
self.all_overall, self.all_receive, self.all_send
]))))
len_last = str(
len(
str(
max([
self.last_connect, self.last_latency,
self.last_overall, self.last_receive, self.last_send
]))))
tpl = "%" + len_all + "d / %" + len_last + "d"
self.lines.append(" Overall: " + tpl % (float(
self.all_overall) / self.all_count, float(self.last_overall) /
self.last_count))
self.lines.append(" Connect: " + tpl % (float(
self.all_connect) / self.all_count, float(self.last_connect) /
self.last_count))
self.lines.append(" Send: " + tpl % (float(
self.all_send) / self.all_count, float(self.last_send) /
self.last_count))
self.lines.append(" Latency: " + tpl % (float(
self.all_latency) / self.all_count, float(self.last_latency) /
self.last_count))
self.lines.append(" Receive: " + tpl % (float(
self.all_receive) / self.all_count, float(self.last_receive) /
self.last_count))
self.lines.append(
" Overall: " + tpl % (
float(self.all_overall) / self.all_count, float(
self.last_overall) / self.last_count))
self.lines.append(
" Connect: " + tpl % (
float(self.all_connect) / self.all_count, float(
self.last_connect) / self.last_count))
self.lines.append(
" Send: " + tpl % (
float(self.all_send) / self.all_count, float(
self.last_send) / self.last_count))
self.lines.append(
" Latency: " + tpl % (
float(self.all_latency) / self.all_count, float(
self.last_latency) / self.last_count))
self.lines.append(
" Receive: " + tpl % (
float(self.all_receive) / self.all_count, float(
self.last_receive) / self.last_count))
else:
self.lines.append("")
self.lines.append("")
@ -576,8 +602,9 @@ class AvgTimesBlock(AbstractBlock):
self.lines.append("")
self.lines.append("")
for line in self.lines:
self.width = max(self.width,
len(self.screen.markup.clean_markup(line)))
self.width = max(
self.width, len(self.screen.markup.clean_markup(line)))
# ======================================================
@ -610,14 +637,13 @@ class CasesBlock(AbstractBlock):
self.screen.markup.WHITE + self.header + self.screen.markup.RESET
]
total_count = sum(case[0] for case in self.cases.values())
tpl = " %s: %" + str(len(str(
total_count))) + "d %5.2f%% / avg %.1f ms"
tpl = " %s: %" + str(len(str(total_count))) + "d %5.2f%% / avg %.1f ms"
for name, (count, resp_time) in sorted(self.cases.iteritems()):
line = tpl % (" " * (self.max_case_len - len(name)) + name, count,
100 * float(count) / total_count,
float(resp_time) / count)
line = tpl % (
" " * (self.max_case_len - len(name)) + name, count,
100 * float(count) / total_count, float(resp_time) / count)
self.lines.append(line)
for line in self.lines:
self.width = max(self.width,
len(self.screen.markup.clean_markup(line)))
self.width = max(
self.width, len(self.screen.markup.clean_markup(line)))

View File

@ -44,8 +44,10 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
return __file__
def get_available_options(self):
return ["jmx", "args", "jmeter_path", "buffer_size",
"buffered_seconds", "exclude_markers"]
return [
"jmx", "args", "jmeter_path", "buffer_size", "buffered_seconds",
"exclude_markers"
]
def configure(self):
self.original_jmx = self.get_option("jmx")
@ -57,19 +59,19 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
self.jmeter_log = self.core.mkstemp('.log', 'jmeter_')
self.jmeter_ver = float(self.get_option('jmeter_ver', '3.0'))
self.ext_log = self.get_option(
'extended_log', self.get_option(
'ext_log', 'none'))
'extended_log', self.get_option('ext_log', 'none'))
if self.ext_log not in self.ext_levels:
self.ext_log = 'none'
if self.ext_log != 'none':
self.ext_log_file = self.core.mkstemp('.jtl', 'jmeter_ext_')
self.core.add_artifact_file(self.ext_log_file)
self.jmeter_buffer_size = int(self.get_option(
'buffer_size', self.get_option('buffered_seconds', '3')))
self.jmeter_buffer_size = int(
self.get_option(
'buffer_size', self.get_option('buffered_seconds', '3')))
self.core.add_artifact_file(self.jmeter_log, True)
self.exclude_markers = set(filter(
(lambda marker: marker != ''), self.get_option('exclude_markers',
[]).split(' ')))
self.exclude_markers = set(
filter((lambda marker: marker != ''),
self.get_option('exclude_markers', []).split(' ')))
self.jmx = self.__add_jmeter_components(
self.original_jmx, self.jtl_file, self._get_variables())
self.core.add_artifact_file(self.jmx)
@ -79,10 +81,11 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
self.jmeter_stderr = open(jmeter_stderr_file, 'w')
def prepare_test(self):
self.args = [self.jmeter_path, "-n", "-t", self.jmx, '-j',
self.jmeter_log,
'-Jjmeter.save.saveservice.default_delimiter=\\t',
'-Jjmeter.save.saveservice.connect_time=true']
self.args = [
self.jmeter_path, "-n", "-t", self.jmx, '-j', self.jmeter_log,
'-Jjmeter.save.saveservice.default_delimiter=\\t',
'-Jjmeter.save.saveservice.connect_time=true'
]
self.args += splitstring(self.user_args)
aggregator = None
@ -108,8 +111,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
aggregator.add_result_listener(widget)
def start_test(self):
logger.info("Starting %s with arguments: %s", self.jmeter_path,
self.args)
logger.info(
"Starting %s with arguments: %s", self.jmeter_path, self.args)
try:
self.jmeter_process = subprocess.Popen(
self.args,
@ -117,8 +120,7 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
preexec_fn=os.setsid,
close_fds=True,
stdout=self.jmeter_stderr,
stderr=self.jmeter_stderr
)
stderr=self.jmeter_stderr)
except OSError:
logger.debug(
"Unable to start JMeter process. Args: %s, Executable: %s",
@ -126,8 +128,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
self.jmeter_path,
exc_info=True)
raise RuntimeError(
"Unable to access to JMeter executable file or it does not exist: %s" %
self.jmeter_path)
"Unable to access to JMeter executable file or it does not exist: %s"
% self.jmeter_path)
self.start_time = time.time()
def is_test_finished(self):
@ -151,8 +153,9 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
def end_test(self, retcode):
if self.jmeter_process:
logger.info("Terminating jmeter process group with PID %s",
self.jmeter_process.pid)
logger.info(
"Terminating jmeter process group with PID %s",
self.jmeter_process.pid)
try:
os.killpg(self.jmeter_process.pid, signal.SIGTERM)
except OSError as exc:
@ -191,16 +194,20 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
if self.ext_log in ['errors', 'all']:
level_map = {'errors': 'true', 'all': 'false'}
tpl_resource = 'jmeter_writer_ext.xml'
tpl_args = {'jtl': self.jtl_file, 'udv': udv,
'ext_log': self.ext_log_file,
'ext_level': level_map[self.ext_log],
'save_connect': save_connect}
tpl_args = {
'jtl': self.jtl_file,
'udv': udv,
'ext_log': self.ext_log_file,
'ext_level': level_map[self.ext_log],
'save_connect': save_connect
}
else:
tpl_resource = 'jmeter_writer.xml'
tpl_args = {
'jtl': self.jtl_file,
'udv': udv,
'save_connect': save_connect}
'save_connect': save_connect
}
tpl = resource_string(__name__, 'config/' + tpl_resource)
@ -258,7 +265,8 @@ class JMeterInfoWidget(AbstractInfoWidget, AggregateResultListener):
template += " Duration: %s\n"
template += "Active Threads: %s\n"
template += " Responses/s: %s"
data = (os.path.basename(self.jmeter.original_jmx), duration,
self.active_threads, self.RPS)
data = (
os.path.basename(self.jmeter.original_jmx), duration,
self.active_threads, self.RPS)
return template % data

View File

@ -59,8 +59,7 @@ def _exc_to_http(param1):
int(param1)
except:
logger.error(
"JMeter wrote some strange data into codes column: %s",
param1)
"JMeter wrote some strange data into codes column: %s", param1)
else:
return int(param1)
@ -113,19 +112,14 @@ def fix_latency(row):
# timeStamp,elapsed,label,responseCode,success,bytes,grpThreads,allThreads,Latency
def string_to_df(data):
chunk = pd.read_csv(
StringIO(data),
sep='\t',
names=jtl_columns,
dtype=jtl_types)
StringIO(data), sep='\t', names=jtl_columns, dtype=jtl_types)
chunk["receive_ts"] = (chunk["send_ts"] + chunk['interval_real']) / 1000.0
chunk['receive_sec'] = chunk["receive_ts"].astype(np.int64)
chunk['interval_real'] = chunk["interval_real"] * 1000 # convert to µs
chunk.set_index(['receive_sec'], inplace=True)
l = len(chunk)
chunk['connect_time'] = (
chunk['connect_time'].fillna(0) *
1000).astype(
np.int64)
chunk['connect_time'] = (chunk['connect_time'].fillna(0) *
1000).astype(np.int64)
chunk['latency'] = chunk['latency'] * 1000
chunk['latency'] = chunk.apply(fix_latency, axis=1)
chunk['send_time'] = np.zeros(l)
@ -139,7 +133,6 @@ def string_to_df(data):
class JMeterStatAggregator(object):
def __init__(self, source):
self.worker = agg.Worker({"allThreads": ["mean"]}, False)
self.source = source
@ -147,16 +140,19 @@ class JMeterStatAggregator(object):
def __iter__(self):
for ts, chunk in self.source:
stats = self.worker.aggregate(chunk)
yield [{'ts': ts,
'metrics': {'instances': stats['allThreads']['mean'],
'reqps': 0}}]
yield [{
'ts': ts,
'metrics': {
'instances': stats['allThreads']['mean'],
'reqps': 0
}
}]
def close(self):
pass
class JMeterReader(object):
def __init__(self, filename):
self.buffer = ""
self.stat_buffer = ""
@ -165,8 +161,8 @@ class JMeterReader(object):
self.agg_finished = False
self.closed = False
self.stat_queue = q.Queue()
self.stats_reader = JMeterStatAggregator(TimeChopper(
self._read_stat_queue(), 3))
self.stats_reader = JMeterStatAggregator(
TimeChopper(self._read_stat_queue(), 3))
def _read_stat_queue(self):
while not self.closed:

View File

@ -24,20 +24,17 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
def configure(self):
self.monitoring_logger = self.create_file_logger(
'monitoring', self.get_option(
'monitoring_log', 'monitoring.log'))
'monitoring', self.get_option('monitoring_log', 'monitoring.log'))
self.aggregator_data_logger = self.create_file_logger(
'aggregator_data', self.get_option('test_data_log', 'test_data.log'))
'aggregator_data',
self.get_option('test_data_log', 'test_data.log'))
self.core.job.subscribe_plugin(self)
def create_file_logger(self, logger_name, file_name, formatter=None):
loggr = logging.getLogger(logger_name)
loggr.setLevel(logging.INFO)
handler = logging.FileHandler(
os.path.join(
self.core.artifacts_dir,
file_name),
mode='w')
os.path.join(self.core.artifacts_dir, file_name), mode='w')
handler.setLevel(logging.INFO)
if formatter:
handler.setFormatter(formatter)
@ -51,14 +48,19 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
@stats: stats about gun
"""
self.aggregator_data_logger.info(
json.dumps({'data': data, 'stats': stats}))
json.dumps({
'data': data,
'stats': stats
}))
def monitoring_data(self, data_list):
if self.is_telegraf:
self.monitoring_logger.info(json.dumps(data_list))
else:
[self.monitoring_logger.info(data.strip())
for data in data_list if data]
[
self.monitoring_logger.info(data.strip()) for data in data_list
if data
]
@property
def is_telegraf(self):

View File

@ -41,8 +41,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
def configure(self):
# plugin part
self.pom = resource_manager.resource_filename(self.get_option(
"pom", "pom.xml"))
self.pom = resource_manager.resource_filename(
self.get_option("pom", "pom.xml"))
self.testcase = self.get_option("testcase", "")
self.maven_args = self.get_option("mvn_args", '').split()
@ -73,10 +73,11 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
process_stderr_file = self.core.mkstemp(".log", "maven_")
self.core.add_artifact_file(process_stderr_file)
self.process_stderr = open(process_stderr_file, 'w')
self.process = subprocess.Popen(args,
stderr=self.process_stderr,
stdout=self.process_stderr,
close_fds=True)
self.process = subprocess.Popen(
args,
stderr=self.process_stderr,
stdout=self.process_stderr,
close_fds=True)
def is_test_finished(self):
retcode = self.process.poll()
@ -88,8 +89,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
def end_test(self, retcode):
if self.process and self.process.poll() is None:
logger.warn("Terminating worker process with PID %s",
self.process.pid)
logger.warn(
"Terminating worker process with PID %s", self.process.pid)
self.process.terminate()
if self.process_stderr:
self.process_stderr.close()

View File

@ -1,5 +1,4 @@
class MavenReader(object):
def close(self):
pass
@ -8,7 +7,6 @@ class MavenReader(object):
class MavenStatsReader(object):
def close(self):
pass

View File

@ -16,7 +16,6 @@ import signal
from threading import Thread
from optparse import OptionParser
logger = logging.getLogger(__name__)
@ -28,8 +27,10 @@ def signal_handler(sig, frame):
def set_sig_handler():
uncatchable = ['SIG_DFL', 'SIGSTOP', 'SIGKILL']
for sig_name in [s for s in dir(signal) if (
s.startswith("SIG") and s not in uncatchable)]:
for sig_name in [
s for s in dir(signal)
if (s.startswith("SIG") and s not in uncatchable)
]:
try:
sig_num = getattr(signal, sig_name)
signal.signal(sig_num, signal_handler)
@ -53,7 +54,6 @@ class AbstractMetric:
class CpuLa(AbstractMetric):
def columns(self, ):
return ['System_la1', 'System_la5', 'System_la15']
@ -76,9 +76,11 @@ class CpuStat(AbstractMetric):
self.current_check = {}
def columns(self, ):
columns = ['System_csw', 'System_int', 'CPU_user', 'CPU_nice',
'CPU_system', 'CPU_idle', 'CPU_iowait', 'CPU_irq',
'CPU_softirq', 'System_numproc', 'System_numthreads']
columns = [
'System_csw', 'System_int', 'CPU_user', 'CPU_nice', 'CPU_system',
'CPU_idle', 'CPU_iowait', 'CPU_irq', 'CPU_softirq',
'System_numproc', 'System_numthreads'
]
return columns
def check(self, ):
@ -90,8 +92,9 @@ class CpuStat(AbstractMetric):
proc_stat_all = proc_stat_file.readlines()
proc_stat_file.close()
except Exception as exc:
logger.error('Error opening /proc/stat. Traceback: %s',
traceback.format_exc(exc))
logger.error(
'Error opening /proc/stat. Traceback: %s',
traceback.format_exc(exc))
result.append([''] * 9)
else:
# Parse data
@ -105,8 +108,9 @@ class CpuStat(AbstractMetric):
if stat.startswith('intr '):
self.current_check['intr'] = float(stat.split()[1])
except Exception as exc:
logger.error('Error parsing /proc/stat data. Traceback: %s',
traceback.format_exc(exc))
logger.error(
'Error parsing /proc/stat data. Traceback: %s',
traceback.format_exc(exc))
# Context switches and interrups delta
try:
@ -116,10 +120,10 @@ class CpuStat(AbstractMetric):
self.prev_check['intr'] = self.current_check['intr']
result.extend([''] * 2)
else:
delta_csw = str(self.current_check['csw'] -
self.prev_check['csw'])
delta_intr = str(self.current_check['intr'] -
self.prev_check['intr'])
delta_csw = str(
self.current_check['csw'] - self.prev_check['csw'])
delta_intr = str(
self.current_check['intr'] - self.prev_check['intr'])
self.prev_check['csw'] = self.current_check['csw']
self.prev_check['intr'] = self.current_check['intr']
result.append(delta_csw)
@ -168,8 +172,9 @@ class CpuStat(AbstractMetric):
else:
pids.append(element)
except Exception as exc:
logger.error('Error trying to count numprocs. Traceback: %s',
traceback.format_exc(exc))
logger.error(
'Error trying to count numprocs. Traceback: %s',
traceback.format_exc(exc))
result.append([''])
else:
result.append(str(len(pids)))
@ -223,17 +228,15 @@ class Custom(AbstractMetric):
for el in self.tail:
cmnd = base64.b64decode(el.split(':')[1])
logger.debug("Run custom check: tail -n 1 %s", cmnd)
output = subprocess.Popen(
['tail', '-n', '1', cmnd],
stdout=subprocess.PIPE).communicate()[0]
output = subprocess.Popen(['tail', '-n', '1', cmnd],
stdout=subprocess.PIPE).communicate()[0]
res.append(self.diff_value(el, output.strip()))
for el in self.call:
cmnd = base64.b64decode(el.split(':')[1])
logger.debug("Run custom check: %s", cmnd)
output = subprocess.Popen(cmnd,
shell=True,
stdout=subprocess.PIPE).stdout.read()
output = subprocess.Popen(
cmnd, shell=True, stdout=subprocess.PIPE).stdout.read()
res.append(self.diff_value(el, output.strip()))
logger.debug("Collected:\n%s", res)
return res
@ -253,7 +256,6 @@ class Custom(AbstractMetric):
class Disk(AbstractMetric):
def __init__(self):
AbstractMetric.__init__(self)
self.read = 0
@ -281,8 +283,10 @@ class Disk(AbstractMetric):
writed += int(data[9])
if self.read or self.write:
result = [str(size * (read - self.read)),
str(size * (writed - self.write))]
result = [
str(size * (read - self.read)),
str(size * (writed - self.write))
]
else:
result = ['', '']
@ -328,8 +332,8 @@ class Disk(AbstractMetric):
devs.append(dsk_name)
break
except Exception as exc:
logger.info("Failed: %s",
traceback.format_exc(exc))
logger.info(
"Failed: %s", traceback.format_exc(exc))
except Exception as exc:
logger.info(
"Failed to get block device name via /sys/devices/: %s",
@ -347,14 +351,16 @@ class Mem(AbstractMetric):
def __init__(self):
AbstractMetric.__init__(self)
self.name = 'advanced memory usage'
self.vars = ('MemUsed', 'Buffers', 'Cached', 'MemFree', 'Dirty',
'MemTotal')
self.vars = (
'MemUsed', 'Buffers', 'Cached', 'MemFree', 'Dirty', 'MemTotal')
# self.open('/proc/meminfo')
def columns(self):
columns = ['Memory_total', 'Memory_used', 'Memory_free',
'Memory_shared', 'Memory_buff', 'Memory_cached']
columns = [
'Memory_total', 'Memory_used', 'Memory_free', 'Memory_shared',
'Memory_buff', 'Memory_cached'
]
logger.info("Start. Columns: %s" % columns)
return columns
@ -373,8 +379,10 @@ class Mem(AbstractMetric):
data.update({name: long(raw_value.split()[0]) / 1024.0})
data['MemUsed'] = data['MemTotal'] - data['MemFree'] - data[
'Buffers'] - data['Cached']
result = [data['MemTotal'], data['MemUsed'], data['MemFree'],
0, data['Buffers'], data['Cached']]
result = [
data['MemTotal'], data['MemUsed'], data['MemFree'], 0,
data['Buffers'], data['Cached']
]
except Exception as e:
logger.error("Can't get meminfo, %s", e, exc_info=True)
result.append([self.empty] * 9)
@ -396,8 +404,7 @@ class NetRetrans(AbstractMetric):
return ['Net_retransmit', ]
def check(self, ):
self.fetch = lambda: int(commands.getoutput(
'netstat -s | grep "segments retransmited" | awk \'{print $1}\''))
self.fetch = lambda: int(commands.getoutput('netstat -s | grep "segments retransmited" | awk \'{print $1}\''))
if self.retr_second is not None:
self.retr_first = self.fetch()
self.delta = []
@ -415,8 +422,16 @@ class NetTcp(AbstractMetric):
def __init__(self):
AbstractMetric.__init__(self)
self.fields = ['Net_closewait', 'Net_estab', 'Net_timewait', ]
self.keys = ['closed', 'estab', 'timewait', ]
self.fields = [
'Net_closewait',
'Net_estab',
'Net_timewait',
]
self.keys = [
'closed',
'estab',
'timewait',
]
def columns(self, ):
return self.fields
@ -427,6 +442,7 @@ class NetTcp(AbstractMetric):
if note set it to 0.
* make output ordered as "fields" list
"""
def fetch():
return commands.getoutput("ss -s | sed -ne '/^TCP:/p'")
@ -457,7 +473,10 @@ class NetTxRx(AbstractMetric):
self.prev_tx = 0
def columns(self, ):
return ['Net_tx', 'Net_rx', ]
return [
'Net_tx',
'Net_rx',
]
def check(self, ):
"""
@ -477,6 +496,7 @@ class NetTxRx(AbstractMetric):
def position(sample):
return lines[0].split().index(sample)
rx_pos = position('RX-OK')
tx_pos = position('TX-OK')
@ -504,7 +524,6 @@ class NetTxRx(AbstractMetric):
class Net(AbstractMetric):
def __init__(self):
AbstractMetric.__init__(self)
self.recv = 0
@ -553,6 +572,7 @@ class Net(AbstractMetric):
logger.debug("Network recieved/sent bytes: %s", result)
return result
# ===========================
@ -588,13 +608,14 @@ class AgentWorker(Thread):
@staticmethod
def popen(cmnd):
return subprocess.Popen(cmnd,
bufsize=0,
preexec_fn=os.setsid,
close_fds=True,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return subprocess.Popen(
cmnd,
bufsize=0,
preexec_fn=os.setsid,
close_fds=True,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def run(self):
logger.info("Running startup commands")
@ -628,8 +649,8 @@ class AgentWorker(Thread):
while not self.finished:
logger.debug('Start check')
line = []
sync_time = str(self.c_start + (int(time.time()) -
self.c_local_start))
sync_time = str(
self.c_start + (int(time.time()) - self.c_local_start))
line.extend([self.c_host, sync_time])
# known metrics
@ -638,8 +659,8 @@ class AgentWorker(Thread):
continue
try:
data = self.known_metrics[metric_name].check()
if len(data) != len(self.known_metrics[
metric_name].columns()):
if len(data) != len(
self.known_metrics[metric_name].columns()):
raise RuntimeError(
"Data len not matched columns count: %s" % data)
except Exception as e:
@ -659,8 +680,7 @@ class AgentWorker(Thread):
sys.stdout.write(row + '\n')
sys.stdout.flush()
except IOError as e:
logger.error(
"Can't send data to collector, terminating, %s", e)
logger.error("Can't send data to collector, terminating, %s", e)
self.finished = True
self.fixed_sleep(self.c_interval)
@ -690,7 +710,6 @@ class AgentWorker(Thread):
class AgentConfig:
def __init__(self, def_cfg_path):
self.c_interval = 1
self.c_host = socket.getfqdn()
@ -717,12 +736,13 @@ class AgentConfig:
help='Config file path, default is: ./' + def_cfg_path,
default=def_cfg_path)
parser.add_option('-t',
'--timestamp',
dest='timestamp',
type='int',
help='Caller timestamp for synchronization',
default=self.c_local_start)
parser.add_option(
'-t',
'--timestamp',
dest='timestamp',
type='int',
help='Caller timestamp for synchronization',
default=self.c_local_start)
(options, args) = parser.parse_args()
self.c_start = options.timestamp
@ -813,8 +833,9 @@ if __name__ == '__main__':
logger.debug("Join the worker thread, waiting for cleanup")
worker.join(10)
if worker.isAlive():
logger.error("Worker have not finished shutdown in "
"10 seconds, going to exit anyway")
logger.error(
"Worker have not finished shutdown in "
"10 seconds, going to exit anyway")
sys.exit(1)
except KeyboardInterrupt:
if not worker.isAlive():

View File

@ -78,11 +78,12 @@ class AgentClient(object):
def start(self):
"""Start remote agent"""
logger.debug('Start monitoring: %s', self.host)
self.session = self.ssh.async_session(" ".join([
"DEBUG=1", self.python, self.path['AGENT_REMOTE_FOLDER'] +
'/agent.py', '-c', self.path['AGENT_REMOTE_FOLDER'] + '/agent.cfg',
'-t', str(int(time.time()))
]))
self.session = self.ssh.async_session(
" ".join([
"DEBUG=1", self.python, self.path['AGENT_REMOTE_FOLDER'] +
'/agent.py', '-c', self.path['AGENT_REMOTE_FOLDER'] +
'/agent.cfg', '-t', str(int(time.time()))
]))
return self.session
def read_maybe(self):
@ -103,8 +104,8 @@ class AgentClient(object):
try:
float(self.interval)
except:
raise ValueError("Monitoring interval should be a number: '%s'" %
self.interval)
raise ValueError(
"Monitoring interval should be a number: '%s'" % self.interval)
cfg = ConfigParser.ConfigParser()
cfg.add_section('main')
@ -136,8 +137,8 @@ class AgentClient(object):
def install(self, loglevel):
"""Create folder and copy agent and metrics scripts to remote host"""
logger.info("Installing monitoring agent at %s@%s...", self.username,
self.host)
logger.info(
"Installing monitoring agent at %s@%s...", self.username, self.host)
# create remote temp dir
cmd = self.python + ' -c "import tempfile; print tempfile.mkdtemp();"'
@ -145,37 +146,39 @@ class AgentClient(object):
try:
out, errors, err_code = self.ssh.execute(cmd)
except:
logger.error("Failed to install monitoring agent to %s",
self.host,
exc_info=True)
logger.error(
"Failed to install monitoring agent to %s",
self.host,
exc_info=True)
return None
if errors:
logging.error("[%s] error: '%s'", self.host, errors)
return None
if err_code:
logging.error("Failed to create remote dir via SSH"
" at %s@%s, code %s: %s" % (self.username, self.host,
err_code, out.strip()))
logging.error(
"Failed to create remote dir via SSH"
" at %s@%s, code %s: %s" %
(self.username, self.host, err_code, out.strip()))
return None
remote_dir = out.strip()
if remote_dir:
self.path['AGENT_REMOTE_FOLDER'] = remote_dir
logger.debug("Remote dir at %s:%s", self.host,
self.path['AGENT_REMOTE_FOLDER'])
logger.debug(
"Remote dir at %s:%s", self.host, self.path['AGENT_REMOTE_FOLDER'])
# Copy agent and config
agent_config = self.create_agent_config(loglevel)
try:
self.ssh.send_file(self.path['AGENT_LOCAL_FOLDER'] + '/agent.py',
self.path['AGENT_REMOTE_FOLDER'] + '/agent.py')
self.ssh.send_file(agent_config,
self.path['AGENT_REMOTE_FOLDER'] + '/agent.cfg')
self.ssh.send_file(
self.path['AGENT_LOCAL_FOLDER'] + '/agent.py',
self.path['AGENT_REMOTE_FOLDER'] + '/agent.py')
self.ssh.send_file(
agent_config, self.path['AGENT_REMOTE_FOLDER'] + '/agent.cfg')
except:
logger.error("Failed to install agent on %s",
self.host,
exc_info=True)
logger.error(
"Failed to install agent on %s", self.host, exc_info=True)
return None
return agent_config
@ -186,12 +189,12 @@ class AgentClient(object):
if self.session:
self.session.send("stop\n")
self.session.close()
fhandle, log_filename = tempfile.mkstemp('.log',
"agent_" + self.host + "_")
fhandle, log_filename = tempfile.mkstemp(
'.log', "agent_" + self.host + "_")
os.close(fhandle)
try:
self.ssh.get_file(self.path['AGENT_REMOTE_FOLDER'] + "_agent.log",
log_filename)
self.ssh.get_file(
self.path['AGENT_REMOTE_FOLDER'] + "_agent.log", log_filename)
self.ssh.rm_r(self.path['AGENT_REMOTE_FOLDER'])
except:
logger.error("Exception while uninstalling agent", exc_info=True)
@ -265,9 +268,7 @@ class MonitoringCollector(object):
logger.debug("Got data from agent: %s", data.strip())
self.send_data.append(
self.filter_unused_data(
self.filter_conf, self.filter_mask, data
)
)
self.filter_conf, self.filter_mask, data))
logger.debug("Data after filtering: %s", self.send_data)
if not self.first_data_received and self.send_data:
@ -286,8 +287,10 @@ class MonitoringCollector(object):
def send_collected_data(self):
"""sends pending data set to listeners"""
[listener.monitoring_data(self.send_data)
for listener in self.listeners]
[
listener.monitoring_data(self.send_data)
for listener in self.listeners
]
self.send_data = []
def get_host_config(self, host, target_hint):
@ -307,14 +310,18 @@ class MonitoringCollector(object):
hostname = host.get('address').lower()
if hostname == '[target]':
if not target_hint:
raise ValueError("Can't use [target] keyword with "
"no target parameter specified")
raise ValueError(
"Can't use [target] keyword with "
"no target parameter specified")
logger.debug("Using target hint: %s", target_hint)
hostname = target_hint.lower()
stats = []
startups = []
shutdowns = []
custom = {'tail': [], 'call': [], }
custom = {
'tail': [],
'call': [],
}
metrics_count = 0
for metric in host:
# known metrics
@ -337,8 +344,9 @@ class MonitoringCollector(object):
isdiff = metric.get('diff')
if not isdiff:
isdiff = 0
stat = "%s:%s:%s" % (base64.b64encode(metric.get('label')),
base64.b64encode(metric.text), isdiff)
stat = "%s:%s:%s" % (
base64.b64encode(metric.get('label')),
base64.b64encode(metric.text), isdiff)
stats.append('Custom:' + stat)
custom[metric.get('measure', 'call')].append(stat)
elif (str(metric.tag)).lower() == 'startup':
@ -379,7 +387,9 @@ class MonitoringCollector(object):
'shutdowns': shutdowns,
# XXX: should be separate?
'stats': {hostname: stats},
'stats': {
hostname: stats
},
}
def getconfig(self, filename, target_hint):
@ -414,8 +424,9 @@ class MonitoringCollector(object):
try:
res.append(filter_list[key])
except IndexError:
logger.warn("Problems filtering data: %s with %s", mask,
len(filter_list))
logger.warn(
"Problems filtering data: %s with %s", mask,
len(filter_list))
return None
return ';'.join(res)
@ -527,8 +538,7 @@ class MonitoringDataDecoder(object):
self.metrics[host] = []
for metric in data:
if metric.startswith("Custom:"):
metric = base64.standard_b64decode(metric.split(':')[
1])
metric = base64.standard_b64decode(metric.split(':')[1])
self.metrics[host].append(metric)
data_dict[metric] = self.NA
is_initial = True
@ -537,12 +547,13 @@ class MonitoringDataDecoder(object):
timestamp = data.pop(0)
if host not in self.metrics.keys():
raise ValueError("Host %s not in started metrics: %s" %
(host, self.metrics))
raise ValueError(
"Host %s not in started metrics: %s" % (host, self.metrics))
if len(self.metrics[host]) != len(data):
raise ValueError("Metrics len and data len differs: %s vs %s" %
(len(self.metrics[host]), len(data)))
raise ValueError(
"Metrics len and data len differs: %s vs %s" %
(len(self.metrics[host]), len(data)))
for metric in self.metrics[host]:
data_dict[metric] = data.pop(0)
@ -550,4 +561,5 @@ class MonitoringDataDecoder(object):
logger.debug("Decoded data %s: %s", host, data_dict)
return host, data_dict, is_initial, timestamp
# FIXME: 3 synchronize times between agent and collector better

View File

@ -43,8 +43,8 @@ class Plugin(AbstractPlugin):
def start_test(self):
if self.monitoring:
self.monitoring.load_start_time = time.time()
logger.debug("load_start_time = %s" %
self.monitoring.load_start_time)
logger.debug(
"load_start_time = %s" % self.monitoring.load_start_time)
def get_available_options(self):
return ["config", "default_target", 'ssh_timeout']
@ -67,8 +67,8 @@ class Plugin(AbstractPlugin):
self.config = xmlfile
if not os.path.exists(self.config):
raise OSError("Monitoring config file not found: %s" %
self.config)
raise OSError(
"Monitoring config file not found: %s" % self.config)
if self.config == 'none':
self.monitoring = None
@ -99,8 +99,8 @@ class Plugin(AbstractPlugin):
info = phantom.get_info()
if info:
self.default_target = info.address
logger.debug("Changed monitoring target to %s",
self.default_target)
logger.debug(
"Changed monitoring target to %s", self.default_target)
except KeyError as ex:
logger.debug("Phantom plugin not found: %s", ex)
@ -192,8 +192,8 @@ class SaveMonToFile(MonitoringDataListener):
self.store.close()
class MonitoringWidget(AbstractInfoWidget, MonitoringDataListener,
MonitoringDataDecoder):
class MonitoringWidget(
AbstractInfoWidget, MonitoringDataListener, MonitoringDataDecoder):
"""
Screen widget
"""
@ -252,8 +252,8 @@ class MonitoringWidget(AbstractInfoWidget, MonitoringDataListener,
res = "Monitoring is " + screen.markup.GREEN + \
"online" + screen.markup.RESET + ":\n"
for hostname, metrics in self.data.items():
tm_stamp = datetime.datetime.fromtimestamp(float(self.time[
hostname])).strftime('%H:%M:%S')
tm_stamp = datetime.datetime.fromtimestamp(
float(self.time[hostname])).strftime('%H:%M:%S')
res += (
" " + screen.markup.CYAN + "%s" + screen.markup.RESET +
" at %s:\n") % (hostname, tm_stamp)
@ -269,8 +269,8 @@ class MonitoringWidget(AbstractInfoWidget, MonitoringDataListener,
return res.strip()
class AbstractMetricCriterion(AbstractCriterion, MonitoringDataListener,
MonitoringDataDecoder):
class AbstractMetricCriterion(
AbstractCriterion, MonitoringDataListener, MonitoringDataDecoder):
""" Parent class for metric criterion """
def __init__(self, autostop, param_str):
@ -290,8 +290,7 @@ class AbstractMetricCriterion(AbstractCriterion, MonitoringDataListener,
self.host = param_str.split(',')[0].strip()
self.metric = param_str.split(',')[1].strip()
self.value_limit = float(param_str.split(',')[2])
self.seconds_limit = expand_to_seconds(param_str.split(',')[
3])
self.seconds_limit = expand_to_seconds(param_str.split(',')[3])
self.last_second = None
self.seconds_count = 0
@ -311,9 +310,10 @@ class AbstractMetricCriterion(AbstractCriterion, MonitoringDataListener,
self.metric] == self.NA:
data[self.metric] = 0
logger.debug("Compare %s %s/%s=%s to %s", self.get_type_string(),
host, self.metric, data[self.metric],
self.value_limit)
logger.debug(
"Compare %s %s/%s=%s to %s",
self.get_type_string(), host, self.metric, data[self.metric],
self.value_limit)
if self.comparison_fn(float(data[self.metric]), self.value_limit):
if not self.seconds_count:
self.cause_second = self.last_second
@ -358,8 +358,9 @@ class MetricHigherCriterion(AbstractMetricCriterion):
return "%s/%s metric value is higher than %s for %s seconds" % items
def widget_explain(self):
items = (self.host, self.metric, self.value_limit, self.seconds_count,
self.seconds_limit)
items = (
self.host, self.metric, self.value_limit, self.seconds_count,
self.seconds_limit)
return "%s/%s > %s for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit
@ -385,8 +386,9 @@ class MetricLowerCriterion(AbstractMetricCriterion):
return "%s/%s metric value is lower than %s for %s seconds" % items
def widget_explain(self):
items = (self.host, self.metric, self.value_limit, self.seconds_count,
self.seconds_limit)
items = (
self.host, self.metric, self.value_limit, self.seconds_count,
self.seconds_limit)
return "%s/%s < %s for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit

View File

@ -10,7 +10,6 @@ logger = logging.getLogger(__name__) # pylint: disable=C0103
class OverloadClient(object):
def __init__(self):
self.address = None
self.token = None
@ -77,8 +76,9 @@ class OverloadClient(object):
def get_task_data(self, task):
return self.get("api/task/" + task + "/summary.json")
def new_job(self, task, person, tank, target_host, target_port, loadscheme,
detailed_time, notify_list):
def new_job(
self, task, person, tank, target_host, target_port, loadscheme,
detailed_time, notify_list):
data = {
'task': task,
'person': person,
@ -110,20 +110,24 @@ class OverloadClient(object):
def get_job_summary(self, jobno):
result = self.get(
'api/job/' + str(jobno) +
"/summary.json?api_token=" + self.api_token)
'api/job/' + str(jobno) + "/summary.json?api_token=" +
self.api_token)
return result[0]
def close_job(self, jobno, retcode):
params = {'exitcode': str(retcode), 'api_token': self.api_token, }
params = {
'exitcode': str(retcode),
'api_token': self.api_token,
}
result = self.get('api/job/' + str(jobno) + '/close.json?' +
urllib.urlencode(params))
result = self.get(
'api/job/' + str(jobno) + '/close.json?' + urllib.urlencode(params))
return result[0]['success']
def edit_job_metainfo(self, jobno, job_name, job_dsc, instances, ammo_path,
loop_count, version_tested, is_regression, component,
tank_type, cmdline, is_starred):
def edit_job_metainfo(
self, jobno, job_name, job_dsc, instances, ammo_path, loop_count,
version_tested, is_regression, component, tank_type, cmdline,
is_starred):
data = {
'name': job_name,
'description': job_dsc,
@ -151,11 +155,8 @@ class OverloadClient(object):
data['description'] = comment.strip()
response = self.post(
'api/job/' +
str(jobno) +
"/set_imbalance.json?api_token=" +
self.api_token,
data)
'api/job/' + str(jobno) + "/set_imbalance.json?api_token=" +
self.api_token, data)
return response
def second_data_to_push_item(self, data, stat, timestamp, overall, case):
@ -191,20 +192,22 @@ class OverloadClient(object):
}
}
for q, value in zip(data["interval_real"]["q"]["q"],
data["interval_real"]["q"]["value"]):
for q, value in zip(
data["interval_real"]["q"]["q"],
data["interval_real"]["q"]["value"]):
api_data['trail']['q' + str(q)] = value / 1000.0
for code, cnt in data["net_code"]["count"].iteritems():
api_data['net_codes'].append({'code': int(code),
'count': int(cnt)})
api_data['net_codes'].append({'code': int(code), 'count': int(cnt)})
for code, cnt in data["proto_code"]["count"].iteritems():
api_data['http_codes'].append({'code': int(code),
'count': int(cnt)})
api_data['http_codes'].append({
'code': int(code),
'count': int(cnt)
})
api_data['time_intervals'] = self.convert_hist(data["interval_real"][
"hist"])
api_data['time_intervals'] = self.convert_hist(
data["interval_real"]["hist"])
return api_data
def convert_hist(self, hist):
@ -228,11 +231,11 @@ class OverloadClient(object):
case_name = "__EMPTY__"
if (len(case_name)) > 128:
raise RuntimeError('tag (case) name is too long: ' + case_name)
push_item = self.second_data_to_push_item(case_data, stat_item, ts,
0, case_name)
push_item = self.second_data_to_push_item(
case_data, stat_item, ts, 0, case_name)
items.append(push_item)
overall = self.second_data_to_push_item(data_item["overall"],
stat_item, ts, 1, '')
overall = self.second_data_to_push_item(
data_item["overall"], stat_item, ts, 1, '')
items.append(overall)
while True:
@ -252,8 +255,9 @@ class OverloadClient(object):
"Retry in 10 sec: %s", ex)
time.sleep(10) # FIXME this makes all plugins freeze
except requests.exceptions.RequestException as ex:
logger.warn("Failed to push second data to API,"
" retry in 10 sec: %s", ex)
logger.warn(
"Failed to push second data to API,"
" retry in 10 sec: %s", ex)
time.sleep(10) # FIXME this makes all plugins freeze
except Exception: # pylint: disable=W0703
# something nasty happened, but we don't want to fail here
@ -288,8 +292,9 @@ class OverloadClient(object):
' retry in 10s: %s', ex)
time.sleep(10) # FIXME this makes all plugins freeze
except requests.exceptions.RequestException as ex:
logger.warning('Problems sending monitoring data,'
' retry in 10s: %s', ex)
logger.warning(
'Problems sending monitoring data,'
' retry in 10s: %s', ex)
time.sleep(10) # FIXME this makes all plugins freeze
except Exception: # pylint: disable=W0703
# something irrecoverable happened
@ -298,13 +303,12 @@ class OverloadClient(object):
return
def send_console(self, jobno, console):
logger.debug("Sending console view [%s]: %s", len(console),
console[:64])
logger.debug(
"Sending console view [%s]: %s", len(console), console[:64])
addr = ("api/job/%s/console.txt?api_token=" % jobno) + self.api_token,
self.post_raw(addr, {"console": console, })
def send_config_snapshot(self, jobno, config):
logger.debug("Sending config snapshot")
addr = ("api/job/%s/configinfo.txt?api_token=" %
jobno) + self.api_token
addr = ("api/job/%s/configinfo.txt?api_token=" % jobno) + self.api_token
self.post_raw(addr, {"configinfo": config, })

View File

@ -58,17 +58,21 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
return __file__
def get_available_options(self):
opts = ["api_address",
"task",
"job_name",
"job_dsc",
"notify",
"ver", ]
opts += ["component",
"regress",
"operator",
"copy_config_to",
"jobno_file", ]
opts = [
"api_address",
"task",
"job_name",
"job_dsc",
"notify",
"ver",
]
opts += [
"component",
"regress",
"operator",
"copy_config_to",
"jobno_file",
]
opts += ["token_file"]
return opts
@ -79,20 +83,22 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
try:
with open(filename, 'r') as handle:
data = handle.read().strip()
logger.info("Read authentication token from %s, "
"token length is %d bytes", filename,
len(str(data)))
logger.info(
"Read authentication token from %s, "
"token length is %d bytes", filename, len(str(data)))
except IOError:
logger.error("Failed to read Overload API token from %s",
filename)
logger.error(
"Failed to read Overload API token from %s", filename)
logger.info(
"Get your Overload API token from https://overload.yandex.net and provide it via 'overload.token_file' parameter")
"Get your Overload API token from https://overload.yandex.net and provide it via 'overload.token_file' parameter"
)
raise RuntimeError("API token error")
return data
else:
logger.error("Overload API token filename is not defined")
logger.info(
"Get your Overload API token from https://overload.yandex.net and provide it via 'overload.token_file' parameter")
"Get your Overload API token from https://overload.yandex.net and provide it via 'overload.token_file' parameter"
)
raise RuntimeError("API token error")
def configure(self):
@ -102,17 +108,13 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
self.api_client.set_api_address(self.get_option("api_address"))
self.api_client.set_api_timeout(self.get_option("api_timeout", 30))
self.api_client.set_api_token(
self.read_token(
self.get_option(
"token_file", "")))
self.read_token(self.get_option("token_file", "")))
self.task = self.get_option("task", "DEFAULT")
self.job_name = unicode(
self.get_option(
"job_name",
"none").decode("utf8"))
self.get_option("job_name", "none").decode("utf8"))
if self.job_name == "ask" and sys.stdin.isatty():
self.job_name = unicode(raw_input(
"Please, enter job_name: ").decode("utf8"))
self.job_name = unicode(
raw_input("Please, enter job_name: ").decode("utf8"))
self.job_dsc = unicode(self.get_option("job_dsc", "").decode("utf8"))
if self.job_dsc == "ask" and sys.stdin.isatty():
self.job_dsc = unicode(
@ -204,8 +206,9 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
logger.info("Detected target: %s", self.target)
self.jobno = self.api_client.new_job(
self.task, self.operator, socket.getfqdn(), self.target, port,
loadscheme, detailed_field, self.notify_list)
self.task, self.operator,
socket.getfqdn(), self.target, port, loadscheme, detailed_field,
self.notify_list)
web_link = "%s%s" % (self.api_client.address, self.jobno)
logger.info("Web link: %s", web_link)
self.publish("jobno", self.jobno)
@ -250,11 +253,11 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
if autostop and autostop.cause_criterion:
rps = 0
if autostop.cause_criterion.cause_second:
rps = autostop.cause_criterion.cause_second[
1]["metrics"]["reqps"]
rps = autostop.cause_criterion.cause_second[1]["metrics"][
"reqps"]
if not rps:
rps = autostop.cause_criterion.cause_second[
0]["overall"]["interval_real"]["len"]
rps = autostop.cause_criterion.cause_second[0][
"overall"]["interval_real"]["len"]
self.api_client.set_imbalance_and_dsc(
self.jobno, rps, autostop.cause_criterion.explain())
@ -289,8 +292,10 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
self.api_client.push_monitoring_data(
self.jobno, json.dumps(data_list))
elif "Monitoring" in self.core.job.monitoring_plugin.__module__:
[self.api_client.push_monitoring_data(
self.jobno, data) for data in data_list if data]
[
self.api_client.push_monitoring_data(self.jobno, data)
for data in data_list if data
]
else:
logger.warn("The test was stopped from Web interface")
@ -305,8 +310,9 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
config_filename = mon.config
if config_filename and config_filename not in ['none', 'auto']:
with open(config_filename) as config_file:
config.set(MonitoringPlugin.SECTION, "config_contents",
config_file.read())
config.set(
MonitoringPlugin.SECTION, "config_contents",
config_file.read())
except Exception: # pylint: disable=W0703
logger.debug("Can't get monitoring config", exc_info=True)
@ -314,8 +320,8 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
config.write(output)
if self.jobno:
try:
self.api_client.send_config_snapshot(self.jobno,
output.getvalue())
self.api_client.send_config_snapshot(
self.jobno, output.getvalue())
except Exception: # pylint: disable=W0703
logger.debug("Can't send config snapshot: %s", exc_info=True)
@ -329,11 +335,7 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
PLUGIN_DIR = os.path.join(self.core.artifacts_base_dir, self.SECTION)
if not os.path.exists(PLUGIN_DIR):
os.makedirs(PLUGIN_DIR)
os.symlink(
self.core.artifacts_dir,
os.path.join(
PLUGIN_DIR,
str(name)))
os.symlink(self.core.artifacts_dir, os.path.join(PLUGIN_DIR, str(name)))
def _core_with_tank_api(self):
"""
@ -347,13 +349,12 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
else:
api_found = isinstance(self.core, yandex_tank_api.worker.TankCore)
logger.debug(
"We are%s running under API server",
"" if api_found else " likely not")
"We are%s running under API server", ""
if api_found else " likely not")
return api_found
class JobInfoWidget(AbstractInfoWidget):
def __init__(self, sender):
AbstractInfoWidget.__init__(self)
self.owner = sender
@ -365,8 +366,9 @@ class JobInfoWidget(AbstractInfoWidget):
template = "Author: " + screen.markup.RED + "%s" + \
screen.markup.RESET + \
"%s\n Job: %s %s\n Web: %s%s"
data = (self.owner.operator[:1], self.owner.operator[1:],
self.owner.jobno, self.owner.job_name,
self.owner.api_client.address, self.owner.jobno)
data = (
self.owner.operator[:1], self.owner.operator[1:], self.owner.jobno,
self.owner.job_name, self.owner.api_client.address,
self.owner.jobno)
return template % data

View File

@ -28,7 +28,10 @@ def linear_schedule(start_rps, end_rps, period):
def unlimited_schedule(*args):
return {"LimiterType": "unlimited", "Parameters": {}, }
return {
"LimiterType": "unlimited",
"Parameters": {},
}
step_producers = {
@ -40,8 +43,8 @@ step_producers = {
def parse_schedule(schedule):
steps = [
step.strip()
for step in " ".join(schedule.split("\n")).split(')') if step.strip()
step.strip() for step in " ".join(schedule.split("\n")).split(')')
if step.strip()
]
if len(steps) > 1:
raise NotImplementedError("Composite schedules not implemented yet")
@ -50,12 +53,11 @@ def parse_schedule(schedule):
if schedule_type in step_producers:
return step_producers[schedule_type](*params)
else:
raise NotImplementedError("Step of type %s is not implemented" %
schedule_type)
raise NotImplementedError(
"Step of type %s is not implemented" % schedule_type)
class PandoraConfig(object):
def __init__(self):
self.pools = []
@ -70,10 +72,9 @@ class PandoraConfig(object):
class PoolConfig(object):
def __init__(self):
self.config = json.loads(resource_string(
__name__, 'config/pandora_pool_default.json'))
self.config = json.loads(
resource_string(__name__, 'config/pandora_pool_default.json'))
def set_ammo(self, ammo):
self.config["AmmoProvider"]["AmmoSource"] = ammo

View File

@ -34,16 +34,17 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
return __file__
def get_available_options(self):
opts = ["pandora_cmd", "buffered_seconds", "ammo", "loop",
"sample_log", "config_file", "startup_schedule",
"user_schedule", "gun_type"]
opts = [
"pandora_cmd", "buffered_seconds", "ammo", "loop", "sample_log",
"config_file", "startup_schedule", "user_schedule", "gun_type"
]
return opts
def configure(self):
# plugin part
self.pandora_cmd = self.get_option("pandora_cmd", "pandora")
self.buffered_seconds = int(self.get_option("buffered_seconds",
self.buffered_seconds))
self.buffered_seconds = int(
self.get_option("buffered_seconds", self.buffered_seconds))
pool_config = PoolConfig()
@ -94,8 +95,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
self.pandora_config_file = self.get_option("config_file", "")
if not self.pandora_config_file:
self.pandora_config_file = self.core.mkstemp(".json",
"pandora_config_")
self.pandora_config_file = self.core.mkstemp(
".json", "pandora_config_")
self.core.add_artifact_file(self.pandora_config_file)
with open(self.pandora_config_file, 'w') as config_file:
config_file.write(self.pandora_config.json())
@ -133,10 +134,11 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
process_stderr_file = self.core.mkstemp(".log", "pandora_")
self.core.add_artifact_file(process_stderr_file)
self.process_stderr = open(process_stderr_file, 'w')
self.process = subprocess.Popen(args,
stderr=self.process_stderr,
stdout=self.process_stderr,
close_fds=True)
self.process = subprocess.Popen(
args,
stderr=self.process_stderr,
stdout=self.process_stderr,
close_fds=True)
def is_test_finished(self):
retcode = self.process.poll()
@ -148,8 +150,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
def end_test(self, retcode):
if self.process and self.process.poll() is None:
logger.warn("Terminating worker process with PID %s",
self.process.pid)
logger.warn(
"Terminating worker process with PID %s", self.process.pid)
self.process.terminate()
if self.process_stderr:
self.process_stderr.close()

View File

@ -13,25 +13,31 @@ class PandoraStatsReader(object):
pandora_response = requests.get("http://localhost:1234/debug/vars")
pandora_stat = pandora_response.json()
return [{'ts': int(time.time() - 1),
'metrics': {
'instances':
pandora_stat.get("engine_ActiveRequests"),
'reqps': pandora_stat.get("engine_ReqPS"),
}}]
return [{
'ts': int(time.time() - 1),
'metrics': {
'instances': pandora_stat.get("engine_ActiveRequests"),
'reqps': pandora_stat.get("engine_ReqPS"),
}
}]
except requests.ConnectionError:
logger.info("Pandora expvar http interface is unavailable")
except requests.HTTPError:
logger.warning("Pandora expvar http interface is unavailable",
exc_info=True)
logger.warning(
"Pandora expvar http interface is unavailable", exc_info=True)
except Exception:
logger.warning("Couldn't decode pandora stat:\n%s\n",
pandora_response.text,
exc_info=True)
logger.warning(
"Couldn't decode pandora stat:\n%s\n",
pandora_response.text,
exc_info=True)
return [{'ts': int(time.time() - 1),
'metrics': {'instances': 0,
'reqps': 0}}]
return [{
'ts': int(time.time() - 1),
'metrics': {
'instances': 0,
'reqps': 0
}
}]
def close(self):
pass

View File

@ -57,8 +57,9 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
return __file__
def get_available_options(self):
opts = ["phantom_path", "buffered_seconds", "exclude_markers",
"affinity"]
opts = [
"phantom_path", "buffered_seconds", "exclude_markers", "affinity"
]
opts += [PhantomConfig.OPTION_PHOUT, self.OPTION_CONFIG]
opts += PhantomConfig.get_available_options()
return opts
@ -68,11 +69,11 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
self.config = self.get_option(self.OPTION_CONFIG, '')
self.phantom_path = self.get_option("phantom_path", 'phantom')
self.enum_ammo = self.get_option("enum_ammo", False)
self.buffered_seconds = int(self.get_option("buffered_seconds",
self.buffered_seconds))
self.exclude_markers = set(filter(
(lambda marker: marker != ''), self.get_option('exclude_markers',
[]).split(' ')))
self.buffered_seconds = int(
self.get_option("buffered_seconds", self.buffered_seconds))
self.exclude_markers = set(
filter((lambda marker: marker != ''),
self.get_option('exclude_markers', []).split(' ')))
self.taskset_affinity = self.get_option('affinity', '')
try:
@ -83,8 +84,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
"No autostop plugin found, not adding instances criterion")
self.predefined_phout = self.get_option(PhantomConfig.OPTION_PHOUT, '')
if not self.get_option(self.OPTION_CONFIG,
'') and self.predefined_phout:
if not self.get_option(
self.OPTION_CONFIG, '') and self.predefined_phout:
self.phout_import_mode = True
if not self.config and not self.phout_import_mode:
@ -111,23 +112,26 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
"Config check failed. Subprocess returned code %s" %
retcode)
if result[2]:
raise RuntimeError("Subprocess returned message: %s" %
result[2])
raise RuntimeError(
"Subprocess returned message: %s" % result[2])
reader = PhantomReader(self.phantom.phout_file)
logger.debug("Linking sample reader to aggregator."
" Reading samples from %s", self.phantom.phout_file)
logger.debug(
"Linking sample reader to aggregator."
" Reading samples from %s", self.phantom.phout_file)
logger.debug("Linking stats reader to aggregator."
" Reading stats from %s", self.phantom.stat_log)
logger.debug(
"Linking stats reader to aggregator."
" Reading stats from %s", self.phantom.stat_log)
else:
reader = PhantomReader(self.predefined_phout)
logger.debug("Linking sample reader to aggregator."
" Reading samples from %s", self.predefined_phout)
logger.debug(
"Linking sample reader to aggregator."
" Reading samples from %s", self.predefined_phout)
if aggregator:
aggregator.reader = reader
info = self.phantom.get_info()
aggregator.stats_reader = PhantomStatsReader(self.phantom.stat_log,
info)
aggregator.stats_reader = PhantomStatsReader(
self.phantom.stat_log, info)
aggregator.add_result_listener(self)
try:
@ -151,27 +155,30 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
def start_test(self):
if not self.phout_import_mode:
args = [self.phantom_path, 'run', self.config]
logger.debug("Starting %s with arguments: %s", self.phantom_path,
args)
logger.debug(
"Starting %s with arguments: %s", self.phantom_path, args)
if self.taskset_affinity != '':
args = [self.core.taskset_path, '-c', self.taskset_affinity
] + args
logger.debug("Enabling taskset for phantom with affinity: %s,"
" cores count: %d", self.taskset_affinity,
self.cpu_count)
args = [
self.core.taskset_path, '-c', self.taskset_affinity
] + args
logger.debug(
"Enabling taskset for phantom with affinity: %s,"
" cores count: %d", self.taskset_affinity, self.cpu_count)
self.phantom_start_time = time.time()
phantom_stderr_file = self.core.mkstemp(".log",
"phantom_stdout_stderr_")
phantom_stderr_file = self.core.mkstemp(
".log", "phantom_stdout_stderr_")
self.core.add_artifact_file(phantom_stderr_file)
self.phantom_stderr = open(phantom_stderr_file, 'w')
self.process = subprocess.Popen(args,
stderr=self.phantom_stderr,
stdout=self.phantom_stderr,
close_fds=True)
self.process = subprocess.Popen(
args,
stderr=self.phantom_stderr,
stdout=self.phantom_stderr,
close_fds=True)
else:
if not os.path.exists(self.predefined_phout):
raise RuntimeError("Phout file not exists for import: %s" %
self.predefined_phout)
raise RuntimeError(
"Phout file not exists for import: %s" %
self.predefined_phout)
logger.warn(
"Will import phout file instead of running phantom: %s",
self.predefined_phout)
@ -180,14 +187,13 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
if not self.phout_import_mode:
retcode = self.process.poll()
if retcode is not None:
logger.info("Phantom done its work with exit code: %s",
retcode)
logger.info("Phantom done its work with exit code: %s", retcode)
return abs(retcode)
else:
info = self.get_info()
if info:
eta = int(info.duration) - (int(time.time()) -
int(self.phantom_start_time))
eta = int(info.duration) - (
int(time.time()) - int(self.phantom_start_time))
self.publish('eta', eta)
return -1
else:
@ -199,8 +205,8 @@ class Plugin(AbstractPlugin, GeneratorPlugin):
def end_test(self, retcode):
if self.process and self.process.poll() is None:
logger.warn("Terminating phantom process with PID %s",
self.process.pid)
logger.warn(
"Terminating phantom process with PID %s", self.process.pid)
self.process.terminate()
if self.process:
self.process.communicate()
@ -255,8 +261,7 @@ class UsedInstancesCriterion(AbstractCriterion):
else:
self.level = int(level_str)
self.is_relative = False
self.seconds_limit = expand_to_seconds(param_str.split(',')[
1])
self.seconds_limit = expand_to_seconds(param_str.split(',')[1])
try:
phantom = autostop.core.get_plugin_of_type(Plugin)
@ -264,8 +269,9 @@ class UsedInstancesCriterion(AbstractCriterion):
if info:
self.threads_limit = info.instances
if not self.threads_limit:
raise ValueError("Cannot create 'instances' criterion"
" with zero instances limit")
raise ValueError(
"Cannot create 'instances' criterion"
" with zero instances limit")
except KeyError:
logger.warning("No phantom module, 'instances' autostop disabled")
@ -302,10 +308,12 @@ class UsedInstancesCriterion(AbstractCriterion):
return level_str
def explain(self):
items = (self.get_level_str(), self.seconds_count,
self.cause_second[0].get('ts'))
return ("Testing threads (instances) utilization"
" higher than %s for %ss, since %s" % items)
items = (
self.get_level_str(), self.seconds_count,
self.cause_second[0].get('ts'))
return (
"Testing threads (instances) utilization"
" higher than %s for %ss, since %s" % items)
def widget_explain(self):
items = (self.get_level_str(), self.seconds_count, self.seconds_limit)

View File

@ -10,7 +10,6 @@ import datetime
import itertools as itt
from StringIO import StringIO
logger = logging.getLogger(__name__)
phout_columns = [
@ -38,10 +37,7 @@ dtypes = {
def string_to_df(data):
start_time = time.time()
chunk = pd.read_csv(
StringIO(data),
sep='\t',
names=phout_columns,
dtype=dtypes)
StringIO(data), sep='\t', names=phout_columns, dtype=dtypes)
chunk['receive_ts'] = chunk.send_ts + chunk.interval_real / 1e6
chunk['receive_sec'] = chunk.receive_ts.astype(np.int64)
@ -49,13 +45,11 @@ def string_to_df(data):
chunk['tag'] = chunk.tag.str.rsplit('#', 1, expand=True)[0]
chunk.set_index(['receive_sec'], inplace=True)
logger.debug("Chunk decode time: %.2fms",
(time.time() - start_time) * 1000)
logger.debug("Chunk decode time: %.2fms", (time.time() - start_time) * 1000)
return chunk
class PhantomReader(object):
def __init__(self, filename, cache_size=1024 * 1024 * 50):
self.buffer = ""
self.phout = open(filename, 'r')
@ -87,7 +81,6 @@ class PhantomReader(object):
class PhantomStatsReader(object):
def __init__(self, filename, phantom_info):
self.phantom_info = phantom_info
self.buffer = ""
@ -116,9 +109,13 @@ class PhantomStatsReader(object):
reqps = 0
if offset >= 0 and offset < len(self.phantom_info.steps):
reqps = self.phantom_info.steps[offset][0]
yield {'ts': chunk_date - 1,
'metrics': {'instances': instances,
'reqps': reqps}}
yield {
'ts': chunk_date - 1,
'metrics': {
'instances': instances,
'reqps': reqps
}
}
def _read_stat_data(self, stat_file):
chunk = stat_file.read(1024 * 1024 * 50)
@ -128,10 +125,12 @@ class PhantomStatsReader(object):
if len(parts) > 1:
ready_chunk = parts[0]
self.stat_buffer = parts[1]
chunks = [json.loads('{%s}}' % s)
for s in ready_chunk.split('\n},')]
return list(itt.chain(*(self._decode_stat_data(chunk)
for chunk in chunks)))
chunks = [
json.loads('{%s}}' % s) for s in ready_chunk.split('\n},')
]
return list(
itt.chain(
*(self._decode_stat_data(chunk) for chunk in chunks)))
else:
self.stat_buffer += stat_file.readline()

View File

@ -4,16 +4,14 @@ from yandextank.plugins.Phantom.reader import PhantomReader
class TestPhantomReader(object):
def test_read_all(self):
reader = PhantomReader(
'yandextank/plugins/Phantom/tests/phout.dat',
cache_size=1024)
'yandextank/plugins/Phantom/tests/phout.dat', cache_size=1024)
df = pd.DataFrame()
for chunk in reader:
if chunk is None:
reader.close()
else:
df = df.append(chunk)
assert(len(df) == 200)
assert(df['interval_real'].mean() == 11000714.0)
assert (len(df) == 200)
assert (df['interval_real'].mean() == 11000714.0)

View File

@ -42,12 +42,14 @@ class PhantomConfig:
@staticmethod
def get_available_options():
opts = ["threads",
"phantom_modules_path",
"additional_libs",
"writelog",
"enum_ammo",
"timeout", ]
opts = [
"threads",
"phantom_modules_path",
"additional_libs",
"writelog",
"enum_ammo",
"timeout",
]
opts += StreamConfig.get_available_options()
return opts
@ -55,8 +57,8 @@ class PhantomConfig:
""" Read phantom tool specific options """
self.threads = self.get_option(
"threads", str(int(multiprocessing.cpu_count() / 2) + 1))
self.phantom_modules_path = self.get_option("phantom_modules_path",
"/usr/lib/phantom")
self.phantom_modules_path = self.get_option(
"phantom_modules_path", "/usr/lib/phantom")
self.additional_libs = self.get_option("additional_libs", "")
self.answ_log_level = self.get_option("writelog", "none")
if self.answ_log_level == '0':
@ -65,16 +67,17 @@ class PhantomConfig:
self.answ_log_level = 'all'
self.timeout = parse_duration(self.get_option("timeout", "11s"))
if self.timeout > 120000:
logger.warning("You've set timeout over 2 minutes."
" Are you a functional tester?")
logger.warning(
"You've set timeout over 2 minutes."
" Are you a functional tester?")
self.answ_log = self.core.mkstemp(".log", "answ_")
self.core.add_artifact_file(self.answ_log)
self.phout_file = self.core.get_option(self.SECTION, self.OPTION_PHOUT,
'')
self.phout_file = self.core.get_option(
self.SECTION, self.OPTION_PHOUT, '')
if not self.phout_file:
self.phout_file = self.core.mkstemp(".log", "phout_")
self.core.set_option(self.SECTION, self.OPTION_PHOUT,
self.phout_file)
self.core.set_option(
self.SECTION, self.OPTION_PHOUT, self.phout_file)
self.core.add_artifact_file(self.phout_file)
self.stat_log = self.core.mkstemp(".log", "phantom_stat_")
self.core.add_artifact_file(self.stat_log)
@ -82,14 +85,17 @@ class PhantomConfig:
self.core.add_artifact_file(self.phantom_log)
main_stream = StreamConfig(
self.core, len(self.streams), self.phout_file, self.answ_log,
self.core,
len(self.streams), self.phout_file, self.answ_log,
self.answ_log_level, self.timeout, self.SECTION)
self.streams.append(main_stream)
for section in self.core.config.find_sections(self.SECTION + '-'):
self.streams.append(StreamConfig(
self.core, len(self.streams), self.phout_file, self.answ_log,
self.answ_log_level, self.timeout, section))
self.streams.append(
StreamConfig(
self.core,
len(self.streams), self.phout_file, self.answ_log,
self.answ_log_level, self.timeout, section))
for stream in self.streams:
stream.read_config()
@ -175,8 +181,8 @@ class PhantomConfig:
result.ammo_file += stream.stepper_wrapper.ammo_file + ' '
result.ammo_count += stream.stepper_wrapper.ammo_count
result.duration = max(result.duration,
stream.stepper_wrapper.duration)
result.duration = max(
result.duration, stream.stepper_wrapper.duration)
result.instances += stream.instances
if not result.ammo_count:
@ -189,8 +195,8 @@ class StreamConfig:
OPTION_INSTANCES_LIMIT = 'instances'
def __init__(self, core, sequence, phout, answ, answ_level, timeout,
section):
def __init__(
self, core, sequence, phout, answ, answ_level, timeout, section):
self.core = core
self.address_wizard = AddressWizard()
@ -229,10 +235,14 @@ class StreamConfig:
@staticmethod
def get_available_options():
opts = ["ssl", "tank_type", 'gatling_ip', "method_prefix",
"source_log_prefix"]
opts += ["phantom_http_line", "phantom_http_field_num",
"phantom_http_field", "phantom_http_entity"]
opts = [
"ssl", "tank_type", 'gatling_ip', "method_prefix",
"source_log_prefix"
]
opts += [
"phantom_http_line", "phantom_http_field_num", "phantom_http_field",
"phantom_http_entity"
]
opts += ['address', "port", StreamConfig.OPTION_INSTANCES_LIMIT]
opts += StepperWrapper.get_available_options()
opts += ["connection_test"]
@ -245,16 +255,16 @@ class StreamConfig:
self.tank_type = self.get_option("tank_type", 'http')
# TODO: refactor. Maybe we should decide how to interact with
# StepperWrapper here.
self.instances = int(self.get_option(self.OPTION_INSTANCES_LIMIT,
'1000'))
self.instances = int(
self.get_option(self.OPTION_INSTANCES_LIMIT, '1000'))
self.gatling = ' '.join(self.get_option('gatling_ip', '').split("\n"))
self.method_prefix = self.get_option("method_prefix", 'method_stream')
self.method_options = self.get_option("method_options", '')
self.source_log_prefix = self.get_option("source_log_prefix", '')
self.phantom_http_line = self.get_option("phantom_http_line", "")
self.phantom_http_field_num = self.get_option("phantom_http_field_num",
"")
self.phantom_http_field_num = self.get_option(
"phantom_http_field_num", "")
self.phantom_http_field = self.get_option("phantom_http_field", "")
self.phantom_http_entity = self.get_option("phantom_http_entity", "")
@ -264,8 +274,8 @@ class StreamConfig:
self.ipv6, self.resolved_ip, self.port, self.address = self.address_wizard.resolve(
self.address, do_test_connect, explicit_port)
logger.info("Resolved %s into %s:%s", self.address, self.resolved_ip,
self.port)
logger.info(
"Resolved %s into %s:%s", self.address, self.resolved_ip, self.port)
self.client_cipher_suites = self.get_option("client_cipher_suites", "")
self.client_certificate = self.get_option("client_certificate", "")
@ -345,11 +355,12 @@ class StreamConfig:
fname = 'phantom_benchmark_main.tpl'
else:
fname = 'phantom_benchmark_additional.tpl'
template_str = template_str = resource_string(__name__,
"config/" + fname)
template_str = template_str = resource_string(
__name__, "config/" + fname)
tpl = string.Template(template_str)
config = tpl.substitute(kwargs)
return config
# ========================================================================

View File

@ -53,8 +53,9 @@ class PhantomProgressBarWidget(AbstractInfoWidget):
elif self.ammo_progress:
left_part = self.ammo_count - self.ammo_progress
if left_part > 0:
eta_secs = int(float(dur_seconds) / float(self.ammo_progress) *
float(left_part))
eta_secs = int(
float(dur_seconds) / float(self.ammo_progress) *
float(left_part))
else:
eta_secs = 0
eta_time = datetime.timedelta(seconds=eta_secs)
@ -77,8 +78,8 @@ class PhantomProgressBarWidget(AbstractInfoWidget):
progress_chars += self.krutilka.next()
res += color_bg + progress_chars + screen.markup.RESET + color_fg
res += '~' * (pb_width - int(pb_width *
progress)) + screen.markup.RESET + ' '
res += '~' * (pb_width - int(pb_width * progress)
) + screen.markup.RESET + ' '
res += str_perc + "\n"
eta = 'ETA: %s' % eta_time
@ -123,17 +124,17 @@ class PhantomInfoWidget(AbstractInfoWidget):
info = self.owner.get_info()
if self.owner.phantom:
template = "Hosts: %s => %s:%s\n Ammo: %s\nCount: %s\n Load: %s"
data = (socket.gethostname(), info.address, info.port,
os.path.basename(info.ammo_file), self.ammo_count,
' '.join(info.rps_schedule))
data = (
socket.gethostname(), info.address, info.port,
os.path.basename(info.ammo_file), self.ammo_count,
' '.join(info.rps_schedule))
res = template % data
res += "\n\n"
res += "Active instances: "
if float(self.instances) / self.instances_limit > 0.8:
res += screen.markup.RED + str(
self.instances) + screen.markup.RESET
res += screen.markup.RED + str(self.instances) + screen.markup.RESET
elif float(self.instances) / self.instances_limit > 0.5:
res += screen.markup.YELLOW + str(
self.instances) + screen.markup.RESET
@ -141,8 +142,7 @@ class PhantomInfoWidget(AbstractInfoWidget):
res += str(self.instances)
res += "\nPlanned requests: %s for %s\nActual responses: " % (
self.planned,
datetime.timedelta(seconds=self.planned_rps_duration))
self.planned, datetime.timedelta(seconds=self.planned_rps_duration))
if not self.planned == self.RPS:
res += screen.markup.YELLOW + str(self.RPS) + screen.markup.RESET
else:
@ -150,22 +150,22 @@ class PhantomInfoWidget(AbstractInfoWidget):
res += "\n Accuracy: "
if self.selfload < 80:
res += screen.markup.RED + ('%.2f' %
self.selfload) + screen.markup.RESET
res += screen.markup.RED + (
'%.2f' % self.selfload) + screen.markup.RESET
elif self.selfload < 95:
res += screen.markup.YELLOW + ('%.2f' %
self.selfload) + screen.markup.RESET
res += screen.markup.YELLOW + (
'%.2f' % self.selfload) + screen.markup.RESET
else:
res += ('%.2f' % self.selfload)
res += "%\n Time lag: "
if self.time_lag > self.owner.buffered_seconds * 5:
logger.debug("Time lag: %s", self.time_lag)
res += screen.markup.RED + str(datetime.timedelta(
seconds=self.time_lag)) + screen.markup.RESET
res += screen.markup.RED + str(
datetime.timedelta(seconds=self.time_lag)) + screen.markup.RESET
elif self.time_lag > self.owner.buffered_seconds:
res += screen.markup.YELLOW + str(datetime.timedelta(
seconds=self.time_lag)) + screen.markup.RESET
res += screen.markup.YELLOW + str(
datetime.timedelta(seconds=self.time_lag)) + screen.markup.RESET
else:
res += str(datetime.timedelta(seconds=self.time_lag))
@ -175,6 +175,8 @@ class PhantomInfoWidget(AbstractInfoWidget):
self.RPS = data["overall"]["interval_real"]["len"]
self.planned = stats["metrics"]["reqps"]
self.instances = stats["metrics"]["instances"]
# TODO:
# self.selfload = second_aggregate_data.overall.selfload
# self.time_lag = int(time.time() - time.mktime(

View File

@ -27,7 +27,8 @@ class Plugin(AbstractPlugin):
self.default_target = None
def _echo_wrapper(cmd):
return 'echo "====Executing: {cmd}"; {cmd}'.format(cmd=cmd)
return 'echo "====Executing: {cmd}"; {cmd}'.format(cmd=cmd)
cmds = {
"dpkg": "dpkg -l",
"uname": "uname -a",
@ -59,8 +60,7 @@ class Plugin(AbstractPlugin):
self.timeout = int(self.get_option("timeout", 3))
except:
logger.error(
'Exception trying to configure Platform plugin',
exc_info=True)
'Exception trying to configure Platform plugin', exc_info=True)
self.logfile = self.core.mkstemp(".log", "platform_")
self.core.add_artifact_file(self.logfile)
@ -84,8 +84,7 @@ class Plugin(AbstractPlugin):
out, errors, err_code = self.ssh.execute(self.cmd)
except Exception:
logger.warning(
"Failed to check remote system information at %s:%s",
host,
"Failed to check remote system information at %s:%s", host,
self.port)
logger.debug(
"Failed to check remote system information at %s:%s",

View File

@ -28,8 +28,8 @@ class Plugin(AbstractPlugin):
return ["interval", "disk_limit", "mem_limit"]
def configure(self):
self.interval = expand_to_seconds(self.get_option(
"interval", self.interval))
self.interval = expand_to_seconds(
self.get_option("interval", self.interval))
self.disk_limit = int(self.get_option("disk_limit", self.disk_limit))
self.mem_limit = int(self.get_option("mem_limit", self.mem_limit))
@ -58,18 +58,20 @@ class Plugin(AbstractPlugin):
self.log.debug("No disk usage info: %s", res[2])
return
disk_free = res[1]
self.log.debug("Disk free space: %s/%s", disk_free.strip(),
self.disk_limit)
self.log.debug(
"Disk free space: %s/%s", disk_free.strip(), self.disk_limit)
if int(disk_free.strip()) < self.disk_limit:
raise RuntimeError(
"Not enough local resources: disk space less than %sMB in %s: %sMB" %
(self.disk_limit, self.core.artifacts_base_dir, int(
disk_free.strip())))
"Not enough local resources: disk space less than %sMB in %s: %sMB"
% (
self.disk_limit, self.core.artifacts_base_dir,
int(disk_free.strip())))
def __check_mem(self):
''' raise exception on RAM exceeded '''
mem_free = psutil.virtual_memory().available / 2**20
self.log.debug("Memory free: %s/%s", mem_free, self.mem_limit)
if mem_free < self.mem_limit:
raise RuntimeError("Not enough resources: free memory less "
"than %sMB: %sMB" % (self.mem_limit, mem_free))
raise RuntimeError(
"Not enough resources: free memory less "
"than %sMB: %sMB" % (self.mem_limit, mem_free))

View File

@ -47,13 +47,14 @@ class Plugin(AbstractPlugin):
def is_test_finished(self):
if self.poll:
self.log.info("Executing: %s", self.poll)
retcode = util.execute(self.poll,
shell=True,
poll_period=0.1,
catch_out=self.catch_out)[0]
retcode = util.execute(
self.poll,
shell=True,
poll_period=0.1,
catch_out=self.catch_out)[0]
if retcode:
self.log.warn("Non-zero exit code, interrupting test: %s",
retcode)
self.log.warn(
"Non-zero exit code, interrupting test: %s", retcode)
return retcode
return -1
@ -72,9 +73,7 @@ class Plugin(AbstractPlugin):
Execute and check exit code
'''
self.log.info("Executing: %s", cmd)
retcode = util.execute(cmd,
shell=True,
poll_period=0.1,
catch_out=self.catch_out)[0]
retcode = util.execute(
cmd, shell=True, poll_period=0.1, catch_out=self.catch_out)[0]
if retcode:
raise RuntimeError("Subprocess returned %s" % retcode)

View File

@ -7,12 +7,18 @@ import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt # noqa:E402
_ALL_ = "All"
_CHARTSETS = {
"cpu-cpu-": {"CPU": _ALL_},
"net-": {"Network": {"bytes_sent", "bytes_recv"}},
"diskio-": {"Disk IO": {"read_bytes", "write_bytes"}, "Disk latency": {"read_time", "write_time"}},
"cpu-cpu-": {
"CPU": _ALL_
},
"net-": {
"Network": {"bytes_sent", "bytes_recv"}
},
"diskio-": {
"Disk IO": {"read_bytes", "write_bytes"},
"Disk latency": {"read_time", "write_time"}
},
}
_CUSTOM_PREFIX = "custom:"
_REPORT_FILE_OPTION = "report_file"
@ -33,7 +39,8 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
def configure(self):
self.__report_path = self.get_option(_REPORT_FILE_OPTION, "report.svg")
if os.path.split(self.__report_path)[0] or os.path.splitdrive(self.__report_path)[0]:
if os.path.split(self.__report_path)[0] or os.path.splitdrive(
self.__report_path)[0]:
raise Exception("Only simple file names supported")
self.__shooting_data = []
@ -50,7 +57,8 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
def post_process(self, retcode):
monitoring_chartsets = self.__get_monitoring_chartsets()
min_x = self.__shooting_data[0]["ts"] # sync start of shooting and start of monitoring
min_x = self.__shooting_data[0][
"ts"] # sync start of shooting and start of monitoring
seaborn.set(style="whitegrid", palette="Set2")
seaborn.despine()
@ -67,7 +75,8 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
plt.gca().legend(fontsize="x-small")
# monitoring
for plot_num, chartset_data in enumerate(sorted(monitoring_chartsets.iteritems()), 1):
for plot_num, chartset_data in enumerate(
sorted(monitoring_chartsets.iteritems()), 1):
chartset_title, signals = chartset_data
plt.subplot(plot_count, 1, plot_num + 1)
@ -96,9 +105,12 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
for chartset_prefix, chartset_data in _CHARTSETS.iteritems():
if signal_prefix.startswith(chartset_prefix):
for chartset_title, chartset_signals in chartset_data.iteritems():
for chartset_title, chartset_signals in chartset_data.iteritems(
):
if chartset_signals is _ALL_ or signal_suffix in chartset_signals:
return "{} {}".format(chartset_title, signal_prefix[len(chartset_prefix):])
return "{} {}".format(
chartset_title,
signal_prefix[len(chartset_prefix):])
else:
return None
else:
@ -115,11 +127,13 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
continue
signal_prefix, signal_suffix = signal_name.split("_", 1)
chartset_title = self.__find_monitoring_chartset(signal_prefix, signal_suffix)
chartset_title = self.__find_monitoring_chartset(
signal_prefix, signal_suffix)
if not chartset_title:
continue
chartsets.setdefault((chartset_title), set()).add((signal_name, signal_suffix))
chartsets.setdefault((chartset_title), set()).add(
(signal_name, signal_suffix))
return chartsets
@ -142,7 +156,8 @@ class Plugin(AbstractPlugin, AggregateResultListener, MonitoringDataListener):
y = {}
for data in self.__shooting_data:
timestamp = data["ts"]
for variant, count in data["overall"][signal_name]["count"].iteritems():
for variant, count in data["overall"][signal_name][
"count"].iteritems():
x.setdefault(variant, []).append(timestamp - min_x)
y.setdefault(variant, []).append(count)
return x, y

View File

@ -13,7 +13,6 @@ import time
from optparse import OptionParser
import Queue as q
logger = logging.getLogger("agent")
collector_logger = logging.getLogger("telegraf")
@ -82,34 +81,35 @@ class Consolidator(object):
if data['name'] == 'diskio':
data['name'] = "{metric_name}-{disk_id}".format(
metric_name=data['name'],
disk_id=data['tags']['name']
)
disk_id=data['tags']['name'])
elif data['name'] == 'net':
data['name'] = "{metric_name}-{interface}".format(
metric_name=data['name'], interface=data['tags']['interface'])
data[
'name'] = "{metric_name}-{interface}".format(
metric_name=data['name'],
interface=data['tags']['interface'])
elif data['name'] == 'cpu':
data['name'] = "{metric_name}-{cpu_id}".format(
metric_name=data['name'],
cpu_id=data['tags']['cpu']
)
cpu_id=data['tags']['cpu'])
key = data['name'] + "_" + key
if key.endswith('_exec_value'):
key = key.replace('_exec_value', '')
self.results[ts][key] = value
except KeyError:
logger.error(
'Malformed json from source: %s', chunk, exc_info=True)
'Malformed json from source: %s',
chunk,
exc_info=True)
except:
logger.error(
'Something nasty happend in consolidator work',
exc_info=True)
if len(self.results) > 5:
ready_to_go_index = min(self.results)
yield json.dumps(
{
ready_to_go_index: self.results.pop(ready_to_go_index, None)
}
)
yield json.dumps({
ready_to_go_index:
self.results.pop(ready_to_go_index, None)
})
class Drain(threading.Thread):
@ -139,7 +139,6 @@ class Drain(threading.Thread):
class AgentWorker(threading.Thread):
def __init__(self, telegraf_path):
super(AgentWorker, self).__init__()
self.working_dir = os.path.dirname(__file__)
@ -167,8 +166,7 @@ class AgentWorker(threading.Thread):
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
stdin=subprocess.PIPE, )
def read_startup_config(self, cfg_file='agent_startup.cfg'):
try:
@ -188,12 +186,10 @@ class AgentWorker(threading.Thread):
logger.info(
'Successfully loaded startup config.\n'
'Startups: %s\n'
'Shutdowns: %s\n', self.startups, self.shutdowns
)
'Shutdowns: %s\n', self.startups, self.shutdowns)
except:
logger.error(
'Error trying to read agent startup config',
exc_info=True)
'Error trying to read agent startup config', exc_info=True)
def run(self):
logger.info("Running startup commands")
@ -204,9 +200,7 @@ class AgentWorker(threading.Thread):
logger.info('Starting metrics collector..')
cmnd = "{telegraf} -config {working_dir}/agent.cfg".format(
telegraf=self.telegraf_path,
working_dir=self.working_dir
)
telegraf=self.telegraf_path, working_dir=self.working_dir)
self.collector = self.popen(cmnd)
telegraf_output = self.working_dir + '/monitoring.rawdata'
@ -218,23 +212,17 @@ class AgentWorker(threading.Thread):
time.sleep(1)
self.drain = Drain(
Consolidator(
DataReader(telegraf_output)
),
self.results
)
Consolidator(DataReader(telegraf_output)), self.results)
self.drain.start()
self.drain_stdout = Drain(
DataReader(self.collector.stdout, pipe=True),
self.results_stdout
)
DataReader(
self.collector.stdout, pipe=True), self.results_stdout)
self.drain_stdout.start()
self.drain_err = Drain(
DataReader(self.collector.stderr, pipe=True),
self.results_err
)
DataReader(
self.collector.stderr, pipe=True), self.results_err)
self.drain_err.start()
while not self.finished:
@ -243,9 +231,7 @@ class AgentWorker(threading.Thread):
data = self.results.get_nowait()
logger.debug(
'send %s bytes of data to collector', len(data))
sys.stdout.write(
str(data) + '\n'
)
sys.stdout.write(str(data) + '\n')
except q.Empty:
break
except:
@ -297,16 +283,21 @@ class AgentWorker(threading.Thread):
def main():
fname = os.path.dirname(__file__) + "/_agent.log"
logging.basicConfig(
level=logging.DEBUG, filename=fname,
level=logging.DEBUG,
filename=fname,
format='%(asctime)s [%(levelname)s] %(name)s:%(lineno)d %(message)s')
parser = OptionParser()
parser.add_option(
"", "--telegraf", dest="telegraf_path",
"",
"--telegraf",
dest="telegraf_path",
help="telegraf_path",
default="/tmp/telegraf")
parser.add_option(
"", "--host", dest="hostname_path",
"",
"--host",
dest="hostname_path",
help="telegraf_path",
default="/usr/bin/telegraf")
(options, args) = parser.parse_args()
@ -315,24 +306,24 @@ def main():
customs_script = os.path.dirname(__file__) + '/agent_customs.sh'
try:
logger.info(
'Trying to make telegraf executable: %s',
options.telegraf_path)
'Trying to make telegraf executable: %s', options.telegraf_path)
# 0o755 compatible with old python versions. 744 is NOT enough
os.chmod(options.telegraf_path, 493)
except OSError:
logger.warning(
'Unable to set %s access rights to execute.',
options.telegraf_path, exc_info=True)
options.telegraf_path,
exc_info=True)
try:
logger.info(
'Trying to make customs script executable: %s',
customs_script)
'Trying to make customs script executable: %s', customs_script)
# 0o755 compatible with old python versions. 744 is NOT enough
os.chmod(customs_script, 493)
except OSError:
logger.warning(
'Unable to set %s access rights to execute.',
customs_script, exc_info=True)
customs_script,
exc_info=True)
worker = AgentWorker(options.telegraf_path)
worker.read_startup_config()

View File

@ -58,10 +58,8 @@ class LocalhostClient(object):
customs_script = self.config.create_custom_exec_script()
try:
copyfile(
self.path['AGENT_LOCAL_FOLDER'] +
'/agent.py',
self.workdir +
'/agent.py')
self.path['AGENT_LOCAL_FOLDER'] + '/agent.py',
self.workdir + '/agent.py')
copyfile(agent_config, self.workdir + '/agent.cfg')
copyfile(startup_config, self.workdir + '/agent_startup.cfg')
copyfile(customs_script, self.workdir + '/agent_customs.sh')
@ -90,8 +88,7 @@ class LocalhostClient(object):
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
stdin=subprocess.PIPE, )
def start(self):
"""Start local agent"""
@ -102,9 +99,7 @@ class LocalhostClient(object):
telegraf_path=self.path['TELEGRAF_LOCAL_PATH'],
host=self.host)
self.session = self.popen(command)
self.reader_thread = threading.Thread(
target=self.read_buffer
)
self.reader_thread = threading.Thread(target=self.read_buffer)
self.reader_thread.setDaemon(True)
return self.session
@ -123,8 +118,8 @@ class LocalhostClient(object):
except ValueError:
logger.debug(
'this exc most likely raised during interpreter shutdown\n'
'otherwise something really nasty happend', exc_info=True
)
'otherwise something really nasty happend',
exc_info=True)
def uninstall(self):
"""
@ -182,8 +177,8 @@ class SSHClient(object):
def install(self):
"""Create folder and copy agent and metrics scripts to remote host"""
logger.info("Installing monitoring agent at %s@%s...", self.username,
self.host)
logger.info(
"Installing monitoring agent at %s@%s...", self.username, self.host)
# create remote temp dir
cmd = self.python + ' -c "import tempfile; print tempfile.mkdtemp();"'
@ -210,9 +205,7 @@ class SSHClient(object):
if remote_dir:
self.path['AGENT_REMOTE_FOLDER'] = remote_dir
logger.debug(
"Remote dir at %s:%s",
self.host,
self.path['AGENT_REMOTE_FOLDER'])
"Remote dir at %s:%s", self.host, self.path['AGENT_REMOTE_FOLDER'])
# create collector config
agent_config = self.config.create_collector_config(
@ -225,9 +218,7 @@ class SSHClient(object):
# support string formatting without indices
remote_cmd = 'import os; print os.path.isfile("' + self.path[
'TELEGRAF_REMOTE_PATH'] + '")'
cmd = self.python + ' -c \'{cmd}\''.format(
cmd=remote_cmd
)
cmd = self.python + ' -c \'{cmd}\''.format(cmd=remote_cmd)
remote_telegraf_exists = "False"
try:
out, err, err_code = self.ssh.execute(cmd)
@ -255,37 +246,30 @@ class SSHClient(object):
self.path['TELEGRAF_REMOTE_PATH'])
elif os.path.isfile("/usr/bin/telegraf"):
self.ssh.send_file(
'/usr/bin/telegraf', self.path['TELEGRAF_REMOTE_PATH']
)
'/usr/bin/telegraf', self.path['TELEGRAF_REMOTE_PATH'])
else:
logger.error(
'Telegraf binary not found neither on %s nor on localhost at specified path: %s\n'
'You can download telegraf binaries here: https://github.com/influxdata/telegraf\n'
'or install debian package: `telegraf`', self.host, self.path['TELEGRAF_LOCAL_PATH'])
'or install debian package: `telegraf`', self.host,
self.path['TELEGRAF_LOCAL_PATH'])
return None, None, None
self.ssh.send_file(
self.path['AGENT_LOCAL_FOLDER'] + '/agent.py',
self.path['AGENT_REMOTE_FOLDER'] + '/agent.py'
)
self.path['AGENT_REMOTE_FOLDER'] + '/agent.py')
self.ssh.send_file(
agent_config,
self.path['AGENT_REMOTE_FOLDER'] +
'/agent.cfg')
agent_config, self.path['AGENT_REMOTE_FOLDER'] + '/agent.cfg')
self.ssh.send_file(
startup_config,
self.path['AGENT_REMOTE_FOLDER'] +
'/agent_startup.cfg')
self.path['AGENT_REMOTE_FOLDER'] + '/agent_startup.cfg')
self.ssh.send_file(
customs_script,
self.path['AGENT_REMOTE_FOLDER'] +
'/agent_customs.sh')
self.path['AGENT_REMOTE_FOLDER'] + '/agent_customs.sh')
except Exception:
logger.error(
"Failed to install agent on %s",
self.host,
exc_info=True)
"Failed to install agent on %s", self.host, exc_info=True)
return None, None, None
return agent_config, startup_config, customs_script
@ -300,9 +284,7 @@ class SSHClient(object):
host=self.host)
logging.debug('Command to start agent: %s', command)
self.session = self.ssh.async_session(command)
self.reader_thread = threading.Thread(
target=self.read_buffer
)
self.reader_thread = threading.Thread(target=self.read_buffer)
self.reader_thread.setDaemon(True)
return self.session
@ -339,12 +321,9 @@ class SSHClient(object):
exc_info=True)
try:
self.ssh.get_file(
self.path['AGENT_REMOTE_FOLDER'] +
"/_agent.log",
log_filename)
self.path['AGENT_REMOTE_FOLDER'] + "/_agent.log", log_filename)
self.ssh.get_file(
self.path['AGENT_REMOTE_FOLDER'] +
"/monitoring.rawdata",
self.path['AGENT_REMOTE_FOLDER'] + "/monitoring.rawdata",
data_filename)
self.ssh.rm_r(self.path['AGENT_REMOTE_FOLDER'])
except Exception:

View File

@ -38,10 +38,7 @@ class MonitoringCollector(object):
self.load_start_time = None
self.config_manager = ConfigManager()
self.old_style_configs = False
self.clients = {
'localhost': LocalhostClient,
'ssh': SSHClient
}
self.clients = {'localhost': LocalhostClient, 'ssh': SSHClient}
def add_listener(self, obj):
self.listeners.append(obj)
@ -103,8 +100,9 @@ class MonitoringCollector(object):
}
self.send_data.append(ready_to_send)
logger.debug('Polling/decoding agents data took: %.2fms',
(time.time() - start_time) * 1000)
logger.debug(
'Polling/decoding agents data took: %.2fms',
(time.time() - start_time) * 1000)
collected_data_length = len(self.send_data)
@ -132,8 +130,10 @@ class MonitoringCollector(object):
def send_collected_data(self):
"""sends pending data set to listeners"""
[listener.monitoring_data(self.send_data)
for listener in self.listeners]
[
listener.monitoring_data(self.send_data)
for listener in self.listeners
]
self.send_data = []

View File

@ -50,20 +50,22 @@ class ConfigManager(object):
},
"Memory": {
"name": '[inputs.mem]',
"fielddrop": '["active", "inactive", "total", "used_per*", "avail*"]',
"fielddrop":
'["active", "inactive", "total", "used_per*", "avail*"]',
},
"Disk": {
"name": '[inputs.diskio]',
"devices": '[{devices}]'.format(
devices=",".join(['"vda%s","sda%s"' % (num, num) for num in range(6)])
),
devices=",".join(
['"vda%s","sda%s"' % (num, num) for num in range(6)])),
},
"Net": {
"name": '[inputs.net]',
"interfaces": '[{interfaces}]'.format(
interfaces=",".join(['"eth%s"' % (num) for num in range(6)])
),
"fielddrop": '["icmp*", "ip*", "udplite*", "tcp*", "udp*", "drop*", "err*"]',
interfaces=",".join(
['"eth%s"' % (num) for num in range(6)])),
"fielddrop":
'["icmp*", "ip*", "udplite*", "tcp*", "udp*", "drop*", "err*"]',
},
"Nstat": {
"name": '[inputs.nstat]',
@ -89,17 +91,15 @@ class ConfigManager(object):
}
defaults_enabled = ['CPU', 'Memory', 'Disk', 'Net', 'System', 'Kernel']
defaults_boolean = [
'percpu',
'round_interval',
'fielddrop',
'fieldpass',
'interfaces',
'devices']
'percpu', 'round_interval', 'fielddrop', 'fieldpass', 'interfaces',
'devices'
]
hostname = host.get('address').lower()
if hostname == '[target]':
if not target_hint:
raise ValueError(
"Can't use `[target]` keyword with no target parameter specified")
"Can't use `[target]` keyword with no target parameter specified"
)
logger.debug("Using target hint: %s", target_hint)
hostname = target_hint.lower()
custom = []
@ -113,14 +113,12 @@ class ConfigManager(object):
if key != 'name' and key not in defaults_boolean:
value = metric.get(key, None)
if value:
defaults[
metric.tag][key] = "'{value}'".format(
defaults[metric.tag][key] = "'{value}'".format(
value=value)
elif key in defaults_boolean:
value = metric.get(key, None)
if value:
defaults[
metric.tag][key] = "{value}".format(
defaults[metric.tag][key] = "{value}".format(
value=value)
host_config[metric.tag] = defaults[metric.tag]
# custom metrics
@ -186,11 +184,15 @@ class AgentConfig(object):
# FIXME incinerate such a string formatting inside a method call
# T_T
config.add_section('startup')
[config.set('startup', "cmd%s" % idx, cmd)
for idx, cmd in enumerate(self.startups)]
[
config.set('startup', "cmd%s" % idx, cmd)
for idx, cmd in enumerate(self.startups)
]
config.add_section('shutdown')
[config.set('shutdown', "cmd%s" % idx, cmd)
for idx, cmd in enumerate(self.shutdowns)]
[
config.set('shutdown', "cmd%s" % idx, cmd)
for idx, cmd in enumerate(self.shutdowns)
]
with open(cfg_path, 'w') as fds:
config.write(fds)
@ -210,16 +212,14 @@ class AgentConfig(object):
if os.path.isfile(cfg_path):
logger.info(
'Found agent custom execs config file in working directory with the same name as created for host %s.\n'
'Creating new one via tempfile. This will affect predictable filenames for agent artefacts', self.host)
'Creating new one via tempfile. This will affect predictable filenames for agent artefacts',
self.host)
handle, cfg_path = tempfile.mkstemp('.sh', 'agent_customs_')
os.close(handle)
cmds = ""
for idx, cmd in enumerate(self.custom):
cmds += "-{idx}) {cmd};;\n".format(
idx=idx,
cmd=cmd['cmd']
)
cmds += "-{idx}) {cmd};;\n".format(idx=idx, cmd=cmd['cmd'])
customs_script = """
#!/bin/sh
while :
@ -263,8 +263,7 @@ class AgentConfig(object):
config.set(
"agent",
"interval",
"'{interval}s'".format(
interval=self.interval))
"'{interval}s'".format(interval=self.interval))
config.set("agent", "round_interval", "true")
config.set("agent", "flush_interval", "'1s'")
config.set("agent", "collection_jitter", "'0s'")
@ -280,9 +279,10 @@ class AgentConfig(object):
if key != 'name':
config.set(
"{section_name}".format(
section_name=self.host_config[section]['name']), "{key}".format(
key=key), "{value}".format(
value=value))
section_name=self.host_config[section][
'name']),
"{key}".format(key=key),
"{value}".format(value=value))
# monitoring-style config
else:
if section in defaults_old_enabled:
@ -291,23 +291,22 @@ class AgentConfig(object):
section_name=self.host_config[section]['name']))
for key, value in iteritems(self.host_config[section]):
if key in [
'fielddrop',
'fieldpass',
'percpu',
'devices',
'interfaces']:
'fielddrop', 'fieldpass', 'percpu',
'devices', 'interfaces'
]:
config.set(
"{section_name}".format(
section_name=self.host_config[section]['name']), "{key}".format(
key=key), "{value}".format(
value=value))
section_name=self.host_config[section][
'name']),
"{key}".format(key=key),
"{value}".format(value=value))
# outputs
config.add_section("[outputs.file]")
config.set("[outputs.file]",
"files",
"['{config}']".format(
config=self.monitoring_data_output))
config.set(
"[outputs.file]",
"files",
"['{config}']".format(config=self.monitoring_data_output))
config.set("[outputs.file]", "data_format", "'json'")
with open(cfg_path, 'w') as fds:

View File

@ -6,7 +6,6 @@ logger = logging.getLogger(__name__)
class MetricsDecoder(object):
def __init__(self):
"""
translates telegraf metric names into common Monitoring metric names
@ -49,32 +48,22 @@ class MetricsDecoder(object):
self.diff_metrics = {
'cpu': [],
'mem': [],
'net': [
'packets_recv',
'packets_sent',
'bytes_recv',
'bytes_sent'],
'net': ['packets_recv', 'packets_sent', 'bytes_recv', 'bytes_sent'],
'nstat': ['TcpRetransSegs'],
'net_response': [],
'kernel': [
'context_switches',
'interrupts',
'processes_forked'],
'kernel': ['context_switches', 'interrupts', 'processes_forked'],
'diskio': [
'read_bytes',
'write_bytes',
'io_time',
'read_time',
'reads',
'write_time',
'writes'],
'custom': []}
'read_bytes', 'write_bytes', 'io_time', 'read_time', 'reads',
'write_time', 'writes'
],
'custom': []
}
def find_common_names(self, key):
if key in self.known_metrics:
return self.known_metrics[key]
else:
return 'custom:{}'. format(key)
return 'custom:{}'.format(key)
decoder = MetricsDecoder()

View File

@ -24,7 +24,6 @@ if sys.version_info[0] < 3:
else:
from configparser import NoOptionError
logger = logging.getLogger(__name__)
@ -54,8 +53,7 @@ class Plugin(AbstractPlugin):
if self.monitoring:
self.monitoring.load_start_time = time.time()
logger.debug(
"load_start_time = %s",
self.monitoring.load_start_time)
"load_start_time = %s", self.monitoring.load_start_time)
def get_available_options(self):
return ["config", "default_target", "ssh_timeout"]
@ -77,8 +75,9 @@ class Plugin(AbstractPlugin):
is_monitoring = None
if is_telegraf and is_monitoring:
raise ValueError('Both telegraf and monitoring configs specified. '
'Clean up your config and delete one of them')
raise ValueError(
'Both telegraf and monitoring configs specified. '
'Clean up your config and delete one of them')
if is_telegraf and not is_monitoring:
return 'telegraf'
if not is_telegraf and is_monitoring:
@ -111,8 +110,7 @@ class Plugin(AbstractPlugin):
self.detected_conf = self.__detect_configuration()
if self.detected_conf:
logging.info(
'Detected monitoring configuration: %s',
self.detected_conf)
'Detected monitoring configuration: %s', self.detected_conf)
self.SECTION = self.detected_conf
self.config = self.get_option("config", "auto").strip()
self.default_target = self.get_option("default_target", "localhost")
@ -133,16 +131,21 @@ class Plugin(AbstractPlugin):
else:
if self.config.lower() == "auto":
self.die_on_fail = False
with open(resource.resource_filename(self.default_config), 'rb') as def_config:
with open(
resource.resource_filename(self.default_config),
'rb') as def_config:
config_contents = def_config.read()
else:
with open(resource.resource_filename(self.config), 'rb') as config:
with open(resource.resource_filename(self.config),
'rb') as config:
config_contents = config.read()
# dump config contents into a file
xmlfile = self.core.mkstemp(".xml", "monitoring_")
self.core.add_artifact_file(xmlfile)
with open(xmlfile, "wb") as f: # output file should be in binary mode to support py3
with open(
xmlfile, "wb"
) as f: # output file should be in binary mode to support py3
f.write(config_contents)
self.config = xmlfile
@ -169,8 +172,7 @@ class Plugin(AbstractPlugin):
if info:
self.default_target = info.address
logger.debug(
"Changed monitoring target to %s",
self.default_target)
"Changed monitoring target to %s", self.default_target)
self.monitoring.config = self.config
if self.default_target:
@ -325,11 +327,11 @@ class MonitoringWidget(AbstractInfoWidget, MonitoringDataListener):
res = "Monitoring is " + screen.markup.GREEN + \
"online" + screen.markup.RESET + ":\n"
for hostname, metrics in self.data.items():
tm_stamp = datetime.datetime.fromtimestamp(float(self.time[
hostname])).strftime('%H:%M:%S')
tm_stamp = datetime.datetime.fromtimestamp(
float(self.time[hostname])).strftime('%H:%M:%S')
res += (
" " + screen.markup.CYAN + "%s" +
screen.markup.RESET + " at %s:\n") % (hostname, tm_stamp)
" " + screen.markup.CYAN + "%s" + screen.markup.RESET +
" at %s:\n") % (hostname, tm_stamp)
for metric, value in sorted(metrics.iteritems()):
if self.sign[hostname][metric] > 0:
value = screen.markup.YELLOW + value + screen.markup.RESET
@ -383,9 +385,10 @@ class AbstractMetricCriterion(AbstractCriterion, MonitoringDataListener):
if self.metric not in data.keys() or not data[self.metric]:
data[self.metric] = 0
logger.debug("Compare %s %s/%s=%s to %s", self.get_type_string(),
host, self.metric, data[self.metric],
self.value_limit)
logger.debug(
"Compare %s %s/%s=%s to %s",
self.get_type_string(), host, self.metric, data[self.metric],
self.value_limit)
if self.comparison_fn(float(data[self.metric]), self.value_limit):
if not self.seconds_count:
self.cause_second = self.last_second
@ -430,8 +433,9 @@ class MetricHigherCriterion(AbstractMetricCriterion):
return "%s/%s metric value is higher than %s for %s seconds" % items
def widget_explain(self):
items = (self.host, self.metric, self.value_limit, self.seconds_count,
self.seconds_limit)
items = (
self.host, self.metric, self.value_limit, self.seconds_count,
self.seconds_limit)
return "%s/%s > %s for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit
@ -457,8 +461,9 @@ class MetricLowerCriterion(AbstractMetricCriterion):
return "%s/%s metric value is lower than %s for %s seconds" % items
def widget_explain(self):
items = (self.host, self.metric, self.value_limit, self.seconds_count,
self.seconds_limit)
items = (
self.host, self.metric, self.value_limit, self.seconds_count,
self.seconds_limit)
return "%s/%s < %s for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit

View File

@ -7,12 +7,10 @@ import json
from ..Telegraf.decoder import decoder
logger = logging.getLogger(__name__)
class MonitoringReader(object):
def __init__(self, source):
self.buffer = []
self.source = source
@ -43,11 +41,12 @@ class MonitoringReader(object):
# key_group sample: diskio
# key_name sample: io_time
try:
key_group, key_name = key.split('_')[0].split(
'-')[0], '_'.join(key.split('_')[1:])
key_group, key_name = key.split('_')[
0].split('-')[0], '_'.join(
key.split('_')[1:])
except:
key_group, key_name = key.split(
'_')[0], '_'.join(key.split('_')[1:])
key_group, key_name = key.split('_')[
0], '_'.join(key.split('_')[1:])
if key_group in decoder.diff_metrics.keys():
if key_name in decoder.diff_metrics[
key_group]:
@ -60,7 +59,10 @@ class MonitoringReader(object):
except KeyError:
logger.debug(
'There is no diff value for metric %s.\n'
'Timestamp: %s. Is it initial data?', key, ts, exc_info=True)
'Timestamp: %s. Is it initial data?',
key,
ts,
exc_info=True)
value = 0
prepared_results[
decoded_key] = value
@ -69,8 +71,7 @@ class MonitoringReader(object):
key)
prepared_results[decoded_key] = value
else:
decoded_key = decoder.find_common_names(
key)
decoded_key = decoder.find_common_names(key)
prepared_results[decoded_key] = value
self.prev_check = jsn[ts]
collect.append((ts, prepared_results))
@ -78,8 +79,7 @@ class MonitoringReader(object):
logger.error(
'Telegraf agent send trash to output: %s', chunk)
logger.debug(
'Telegraf agent data block w/ trash: %s',
exc_info=True)
'Telegraf agent data block w/ trash: %s', exc_info=True)
return []
except:
logger.error(

View File

@ -7,7 +7,6 @@ else:
class TestConfigManager(object):
def test_rawxml_parse(self):
""" raw xml read from string """
manager = ConfigManager()
@ -29,8 +28,7 @@ class TestConfigManager(object):
'yandextank/plugins/Telegraf/tests/old_mon.xml', 'sometargethint')
assert (
configs[0]['host'] == 'somehost.yandex.tld' and
configs[0]['host_config']['CPU']['name'] == '[inputs.cpu]'
)
configs[0]['host_config']['CPU']['name'] == '[inputs.cpu]')
def test_xml_telegraf_parse(self):
""" telegraf-style monitoring xml parse """
@ -40,8 +38,7 @@ class TestConfigManager(object):
'sometargethint')
assert (
configs[0]['host'] == 'somehost.yandex.tld' and
configs[0]['host_config']['CPU']['name'] == '[inputs.cpu]'
)
configs[0]['host_config']['CPU']['name'] == '[inputs.cpu]')
def test_target_hint(self):
""" test target hint (special address=[target] option) """
@ -53,7 +50,6 @@ class TestConfigManager(object):
class TestAgentConfig(object):
def test_create_startup_configs(self):
""" test agent config creates startup config """
manager = ConfigManager()
@ -81,9 +77,8 @@ class TestAgentConfig(object):
cfg_parser.has_section('agent') and
cfg_parser.get('agent', 'interval') == "'1s'" and
cfg_parser.has_section('[outputs.file') and
cfg_parser.get(
'[outputs.file', 'files') == "['{rmt}/monitoring.rawdata']".format(rmt=remote_workdir)
)
cfg_parser.get('[outputs.file', 'files') ==
"['{rmt}/monitoring.rawdata']".format(rmt=remote_workdir))
def test_create_custom_exec_script(self):
""" test agent config creates custom_exec config """

View File

@ -3,14 +3,12 @@ from yandextank.plugins.Telegraf import Plugin as TelegrafPlugin
class TestTelegrafPlugin(object):
def test_plugin_configuration(self):
""" testing telegraf plugin configuration """
core = TankCore()
telegraf_plugin = TelegrafPlugin(core)
core.set_option(
'telegraf',
'config',
'telegraf', 'config',
'yandextank/plugins/Telegraf/tests/telegraf_mon.xml')
telegraf_plugin.configure()
assert telegraf_plugin.detected_conf == 'telegraf'
@ -20,8 +18,7 @@ class TestTelegrafPlugin(object):
core = TankCore()
telegraf_plugin = TelegrafPlugin(core)
core.set_option(
'monitoring',
'config',
'monitoring', 'config',
'yandextank/plugins/Telegraf/tests/old_mon.xml')
telegraf_plugin.configure()
assert telegraf_plugin.detected_conf == 'monitoring'
@ -31,12 +28,10 @@ class TestTelegrafPlugin(object):
core = TankCore()
telegraf_plugin = TelegrafPlugin(core)
core.set_option(
'monitoring',
'config',
'monitoring', 'config',
'yandextank/plugins/Telegraf/tests/old_mon.xml')
core.set_option(
'telegraf',
'config',
'telegraf', 'config',
'yandextank/plugins/Telegraf/tests/telegraf_mon.xml')
try:
telegraf_plugin.configure()

View File

@ -1 +1 @@
from plugin import * # noqa:F401,F403
from plugin import * # noqa:F401,F403

View File

@ -19,8 +19,10 @@ class Plugin(AbstractPlugin, AbstractInfoWidget):
def __init__(self, core):
AbstractPlugin.__init__(self, core)
AbstractInfoWidget.__init__(self)
self.lines = [l.decode(
'utf-8') for l in resource_stream(__name__, "config/tips.txt").readlines()]
self.lines = [
l.decode('utf-8')
for l in resource_stream(__name__, "config/tips.txt").readlines()
]
self.disable = 0
line = random.choice(self.lines)
@ -60,6 +62,6 @@ class Plugin(AbstractPlugin, AbstractInfoWidget):
self.probability += 1e-3
line = screen.markup.WHITE + "Tips & Tricks => " + \
self.section + screen.markup.RESET + ":\n "
line += "\n ".join(textwrap.wrap(self.tip, screen.right_panel_width -
2))
line += "\n ".join(
textwrap.wrap(self.tip, screen.right_panel_width - 2))
return line

View File

@ -11,21 +11,21 @@ from .module_exceptions import StepperConfigurationError, AmmoFileError
class ComponentFactory():
def __init__(self,
rps_schedule=None,
http_ver='1.1',
ammo_file=None,
instances_schedule=None,
instances=1000,
loop_limit=-1,
ammo_limit=-1,
uris=None,
headers=None,
autocases=None,
enum_ammo=False,
ammo_type='phantom',
chosen_cases=[], ):
def __init__(
self,
rps_schedule=None,
http_ver='1.1',
ammo_file=None,
instances_schedule=None,
instances=1000,
loop_limit=-1,
ammo_limit=-1,
uris=None,
headers=None,
autocases=None,
enum_ammo=False,
ammo_type='phantom',
chosen_cases=[], ):
self.log = logging.getLogger(__name__)
self.ammo_file = ammo_file
self.ammo_type = ammo_type
@ -57,7 +57,8 @@ class ComponentFactory():
"""
if self.rps_schedule and self.instances_schedule:
raise StepperConfigurationError(
'Both rps and instances schedules specified. You must specify only one of them')
'Both rps and instances schedules specified. You must specify only one of them'
)
elif self.rps_schedule:
info.status.publish('loadscheme', self.rps_schedule)
return lp.create(self.rps_schedule)
@ -84,11 +85,11 @@ class ComponentFactory():
}
if self.uris and self.ammo_file:
raise StepperConfigurationError(
'Both uris and ammo file specified. You must specify only one of them')
'Both uris and ammo file specified. You must specify only one of them'
)
elif self.uris:
ammo_gen = missile.UriStyleGenerator(self.uris,
self.headers,
http_ver=self.http_ver)
ammo_gen = missile.UriStyleGenerator(
self.uris, self.headers, http_ver=self.http_ver)
elif self.ammo_file:
if self.ammo_type in af_readers:
if self.ammo_type == 'phantom':
@ -98,10 +99,12 @@ class ComponentFactory():
if not ammo.next()[0].isdigit():
self.ammo_type = 'uri'
self.log.info(
"Setting ammo_type 'uri' because ammo is not started with digit and you did not specify ammo format")
"Setting ammo_type 'uri' because ammo is not started with digit and you did not specify ammo format"
)
else:
self.log.info(
"Default ammo type ('phantom') used, use 'phantom.ammo_type' option to override it")
"Default ammo type ('phantom') used, use 'phantom.ammo_type' option to override it"
)
except StopIteration:
self.log.exception(
"Couldn't read first line of ammo file")
@ -110,9 +113,8 @@ class ComponentFactory():
else:
raise NotImplementedError(
'No such ammo type implemented: "%s"' % self.ammo_type)
ammo_gen = af_readers[self.ammo_type](self.ammo_file,
headers=self.headers,
http_ver=self.http_ver)
ammo_gen = af_readers[self.ammo_type](
self.ammo_file, headers=self.headers, http_ver=self.http_ver)
else:
raise StepperConfigurationError(
'Ammo not found. Specify uris or ammo file')

View File

@ -15,8 +15,9 @@ class Stpd(object):
self.af = ammo_factory
def __iter__(self):
return ("%s %s %s\n%s\n" % (len(missile), timestamp, marker, missile)
for timestamp, marker, missile in self.af)
return (
"%s %s %s\n%s\n" % (len(missile), timestamp, marker, missile)
for timestamp, marker, missile in self.af)
class StpdReader(object):
@ -36,6 +37,7 @@ class StpdReader(object):
return line # EOF
chunk_header = line.strip('\r\n')
return chunk_header
with open(self.filename, 'rb') as ammo_file:
chunk_header = read_chunk_header(ammo_file)
while chunk_header != '':
@ -47,12 +49,12 @@ class StpdReader(object):
missile = ammo_file.read(chunk_size)
if len(missile) < chunk_size:
raise StpdFileError(
"Unexpected end of file: read %s bytes instead of %s" %
(len(missile), chunk_size))
"Unexpected end of file: read %s bytes instead of %s"
% (len(missile), chunk_size))
yield (timestamp, missile, marker)
except (IndexError, ValueError) as e:
raise StpdFileError(
"Error while reading ammo file. Position: %s, header: '%s', original exception: %s" %
(ammo_file.tell(), chunk_header, e))
"Error while reading ammo file. Position: %s, header: '%s', original exception: %s"
% (ammo_file.tell(), chunk_header, e))
chunk_header = read_chunk_header(ammo_file)
self.log.info("Reached the end of stpd file")

View File

@ -38,8 +38,8 @@ class StepperStatus(object):
def publish(self, key, value):
if key not in self.info:
raise RuntimeError("Tried to publish to a non-existent key: %s" %
key)
raise RuntimeError(
"Tried to publish to a non-existent key: %s" % key)
log.debug('Published %s to %s', value, key)
self.info[key] = value
@ -88,8 +88,8 @@ class StepperStatus(object):
self.info['loop_count'] = self._loop_count
for key in self.info:
if self.info[key] is None:
raise RuntimeError("Information for %s is not published yet." %
key)
raise RuntimeError(
"Information for %s is not published yet." % key)
return StepperInfo(**self.info)
def update_view(self):
@ -100,15 +100,16 @@ class StepperStatus(object):
self._timer = cur_time
if time_delta > 0:
stdout.write(
"AF: %3s%%, LP: %3s%%, loops: %10s, speed: %5s Krps\r" %
(self.af_progress, self.lp_progress, self.loop_count, int(
ammo_generated / time_delta / 1000.0)))
"AF: %3s%%, LP: %3s%%, loops: %10s, speed: %5s Krps\r" % (
self.af_progress, self.lp_progress, self.loop_count,
int(ammo_generated / time_delta / 1000.0)))
stdout.flush()
if self.core:
self.core.publish("stepper", "progress", self.lp_progress)
self.core.publish("stepper", "loop_count", self.loop_count)
self.core.publish("stepper", "speed", "%s Krps" %
int(ammo_generated / time_delta / 1000.0))
self.core.publish(
"stepper", "speed",
"%s Krps" % int(ammo_generated / time_delta / 1000.0))
def update_af_progress(self):
if self.af_size and self.loop_limit and self.af_position is not None:

View File

@ -9,7 +9,6 @@ from builtins import range
class LoadPlanBuilder(object):
def __init__(self):
self.generators = []
self.steps = []
@ -33,15 +32,16 @@ class LoadPlanBuilder(object):
return self
def ramp(self, count, duration):
self.log.debug("Ramp %s instances in %sms from %sms" %
(count, duration, self.duration))
self.log.debug(
"Ramp %s instances in %sms from %sms" %
(count, duration, self.duration))
if count < 0:
raise StepperConfigurationError(
"Can not stop instances in instances_schedule.")
interval = float(duration) / (count - 1)
start_time = self.duration
self.generators.append(int(start_time + i * interval)
for i in range(0, count))
self.generators.append(
int(start_time + i * interval) for i in range(0, count))
self.steps += [(self.instances + i + 1, int(interval / 1000.0))
for i in range(0, count)]
self.instances += count
@ -58,8 +58,8 @@ class LoadPlanBuilder(object):
self.ramp(final_instances - initial_instances + 1, duration)
return self
def stairway(self, initial_instances, final_instances, step_size,
step_duration):
def stairway(
self, initial_instances, final_instances, step_size, step_duration):
step_count = (final_instances - initial_instances) // step_size
self.log.debug("Making a stairway: %s steps" % step_count)
self.start(initial_instances - self.instances)
@ -79,7 +79,8 @@ class LoadPlanBuilder(object):
self.ramp(int(instances), parse_duration(interval))
else:
self.log.info(
"Ramp step format: 'ramp(<instances_to_start>, <step_duration>)'")
"Ramp step format: 'ramp(<instances_to_start>, <step_duration>)'"
)
raise StepperConfigurationError(
"Error in step configuration: 'ramp(%s'" % params)
@ -91,7 +92,8 @@ class LoadPlanBuilder(object):
self.const(int(instances), parse_duration(interval))
else:
self.log.info(
"Const step format: 'const(<instances_count>, <step_duration>)'")
"Const step format: 'const(<instances_count>, <step_duration>)'"
)
raise StepperConfigurationError(
"Error in step configuration: 'const(%s'" % params)
@ -112,11 +114,12 @@ class LoadPlanBuilder(object):
if s_res:
initial_instances, final_instances, interval = s_res.groups()
self.line(
int(initial_instances), int(final_instances),
parse_duration(interval))
int(initial_instances),
int(final_instances), parse_duration(interval))
else:
self.log.info(
"Line step format: 'line(<initial_instances>, <final_instances>, <step_duration>)'")
"Line step format: 'line(<initial_instances>, <final_instances>, <step_duration>)'"
)
raise StepperConfigurationError(
"Error in step configuration: 'line(%s'" % params)
@ -139,11 +142,13 @@ class LoadPlanBuilder(object):
initial_instances, final_instances, step_size, step_duration = s_res.groups(
)
self.stairway(
int(initial_instances), int(final_instances),
int(initial_instances),
int(final_instances),
int(step_size), parse_duration(step_duration))
else:
self.log.info(
"Stairway step format: 'step(<initial_instances>, <final_instances>, <step_size>, <step_duration>)'")
"Stairway step format: 'step(<initial_instances>, <final_instances>, <step_size>, <step_duration>)'"
)
raise StepperConfigurationError(
"Error in step configuration: 'step(%s'" % params)

View File

@ -21,8 +21,9 @@ class Const(object):
if self.rps == 0:
return iter([])
interval = 1000.0 / self.rps
return (int(i * interval)
for i in range(0, int(self.rps * self.duration / 1000)))
return (
int(i * interval)
for i in range(0, int(self.rps * self.duration / 1000)))
def rps_at(self, t):
'''Return rps for second t'''
@ -112,8 +113,8 @@ class Line(object):
:rtype: list
"""
seconds = range(0, int(self.duration) + 1)
rps_groups = groupby(
[proper_round(self.rps_at(t)) for t in seconds], lambda x: x)
rps_groups = groupby([proper_round(self.rps_at(t)) for t in seconds],
lambda x: x)
rps_list = [(rps, len(list(rpl))) for rps, rpl in rps_groups]
return rps_list
@ -140,12 +141,11 @@ class Composite(object):
return int(sum(step.__len__() for step in self.steps))
def get_rps_list(self):
return list(chain.from_iterable(step.get_rps_list()
for step in self.steps))
return list(
chain.from_iterable(step.get_rps_list() for step in self.steps))
class Stairway(Composite):
def __init__(self, minrps, maxrps, increment, step_duration):
if maxrps < minrps:
increment = -increment
@ -164,7 +164,6 @@ class Stairway(Composite):
class StepFactory(object):
@staticmethod
def line(params):
template = re.compile('([0-9.]+),\s*([0-9.]+),\s*([0-9.]+[dhms]?)+\)')
@ -183,8 +182,8 @@ class StepFactory(object):
'([0-9.]+),\s*([0-9.]+),\s*([0-9.]+),\s*([0-9.]+[dhms]?)+\)')
minrps, maxrps, increment, duration = template.search(params).groups()
return Stairway(
float(minrps), float(maxrps), float(increment),
parse_duration(duration))
float(minrps),
float(maxrps), float(increment), parse_duration(duration))
@staticmethod
def produce(step_config):
@ -198,8 +197,8 @@ class StepFactory(object):
if load_type in _plans:
return _plans[load_type](params)
else:
raise NotImplementedError('No such load type implemented: "%s"' %
load_type)
raise NotImplementedError(
'No such load type implemented: "%s"' % load_type)
def create(rps_schedule):
@ -207,8 +206,8 @@ def create(rps_schedule):
Create Load Plan as defined in schedule. Publish info about its duration.
"""
if len(rps_schedule) > 1:
lp = Composite([StepFactory.produce(step_config)
for step_config in rps_schedule])
lp = Composite(
[StepFactory.produce(step_config) for step_config in rps_schedule])
else:
lp = StepFactory.produce(rps_schedule[0])
info.status.publish('duration', lp.get_duration() / 1000)

View File

@ -39,18 +39,18 @@ class AmmoFactory(object):
configured ComponentFactory, passed as a parameter to the
__init__ method of this class.
'''
ammo_stream = (ammo
for ammo in ((missile, marker or self.marker(missile))
for missile, marker in self.ammo_generator)
if self.filter(ammo))
ammo_stream = (
ammo
for ammo in ((missile, marker or self.marker(missile))
for missile, marker in self.ammo_generator)
if self.filter(ammo))
return ((timestamp, marker or self.marker(missile), missile)
for timestamp, (missile, marker) in zip(self.load_plan,
ammo_stream))
for timestamp, (missile, marker
) in zip(self.load_plan, ammo_stream))
class Stepper(object):
def __init__(self, core, **kwargs):
info.status = info.StepperStatus()
info.status.core = core
@ -118,19 +118,25 @@ class StepperWrapper(object):
def get_option(self, option_ammofile, param2=None):
''' get_option wrapper'''
result = self.core.get_option(self.section, option_ammofile, param2)
self.log.debug("Option %s.%s = %s", self.section, option_ammofile,
result)
self.log.debug(
"Option %s.%s = %s", self.section, option_ammofile, result)
return result
@staticmethod
def get_available_options():
opts = [StepperWrapper.OPTION_AMMOFILE, StepperWrapper.OPTION_LOOP,
StepperWrapper.OPTION_SCHEDULE, StepperWrapper.OPTION_STPD,
StepperWrapper.OPTION_INSTANCES_LIMIT]
opts += ["instances_schedule", "uris", "headers", "header_http",
"autocases", "enum_ammo", "ammo_type", "ammo_limit"]
opts += ["use_caching", "cache_dir", "force_stepping", "file_cache",
"chosen_cases"]
opts = [
StepperWrapper.OPTION_AMMOFILE, StepperWrapper.OPTION_LOOP,
StepperWrapper.OPTION_SCHEDULE, StepperWrapper.OPTION_STPD,
StepperWrapper.OPTION_INSTANCES_LIMIT
]
opts += [
"instances_schedule", "uris", "headers", "header_http", "autocases",
"enum_ammo", "ammo_type", "ammo_limit"
]
opts += [
"use_caching", "cache_dir", "force_stepping", "file_cache",
"chosen_cases"
]
return opts
def read_config(self):
@ -150,12 +156,12 @@ class StepperWrapper(object):
steps.append(step.strip() + ')')
return steps
self.rps_schedule = make_steps(self.get_option(self.OPTION_SCHEDULE,
''))
self.instances_schedule = make_steps(self.get_option(
"instances_schedule", ''))
self.instances = int(self.get_option(self.OPTION_INSTANCES_LIMIT,
'1000'))
self.rps_schedule = make_steps(
self.get_option(self.OPTION_SCHEDULE, ''))
self.instances_schedule = make_steps(
self.get_option("instances_schedule", ''))
self.instances = int(
self.get_option(self.OPTION_INSTANCES_LIMIT, '1000'))
self.uris = self.get_option("uris", '').strip().split("\n")
while '' in self.uris:
self.uris.remove('')
@ -167,8 +173,8 @@ class StepperWrapper(object):
self.use_caching = int(self.get_option("use_caching", '1'))
self.file_cache = int(self.get_option('file_cache', '8192'))
cache_dir = self.core.get_option(self.section, "cache_dir",
self.core.artifacts_base_dir)
cache_dir = self.core.get_option(
self.section, "cache_dir", self.core.artifacts_base_dir)
self.cache_dir = os.path.expanduser(cache_dir)
self.force_stepping = int(self.get_option("force_stepping", '0'))
self.stpd = self.get_option(self.OPTION_STPD, "")
@ -209,7 +215,8 @@ class StepperWrapper(object):
instances=self.instances)
publish_info(stepper_info)
else:
if (self.force_stepping and
if (
self.force_stepping and
os.path.exists(self.__si_filename())):
os.remove(self.__si_filename())
self.__make_stpd_file()
@ -240,7 +247,8 @@ class StepperWrapper(object):
hashed_str += sep + str(self.ammo_limit) + sep + ';'.join(
self.rps_schedule) + sep + str(self.autocases)
hashed_str += sep + ";".join(self.uris) + sep + ";".join(
self.headers) + sep + self.http_ver + sep + ";".join(self.chosen_cases)
self.headers) + sep + self.http_ver + sep + ";".join(
self.chosen_cases)
hashed_str += sep + str(self.enum_ammo) + sep + str(self.ammo_type)
if self.instances_schedule:
hashed_str += sep + str(self.instances)

View File

@ -12,8 +12,8 @@ param1=50&param2=0&param3=hello
def __mark_by_uri(missile):
return '_'.join(missile.split('\n', 1)[0].split(' ', 2)[1].split('?')[
0].split('/'))
return '_'.join(
missile.split('\n', 1)[0].split(' ', 2)[1].split('?')[0].split('/'))
class __UriMarker(object):
@ -29,15 +29,18 @@ class __UriMarker(object):
self.limit = limit
def __call__(self, missile):
return '_'.join(missile.split('\n', 1)[0].split(' ', 2)[1].split('?')[
0].split('/')[0:self.limit + 1])
return '_'.join(
missile.split('\n', 1)[0].split(' ', 2)[1].split('?')[0].split('/')[
0:self.limit + 1])
__markers = {'uniq': lambda m: uuid4().hex, 'uri': __mark_by_uri, }
__markers = {
'uniq': lambda m: uuid4().hex,
'uri': __mark_by_uri,
}
class __Enumerator(object):
def __init__(self, marker):
self.marker = marker
self.number = int(0)
@ -80,6 +83,7 @@ def get_marker(marker_type, enum_ammo=False):
if limit:
marker = __UriMarker(limit)
else:
def marker(m):
return ''
except ValueError:

View File

@ -46,8 +46,8 @@ class HttpAmmo(object):
headers = '\r\n'.join(self.headers) + '\r\n'
else:
headers = ''
return "%s %s %s\r\n%s\r\n%s" % (self.method, self.uri, self.proto,
headers, self.body)
return "%s %s %s\r\n%s\r\n%s" % (
self.method, self.uri, self.proto, headers, self.body)
class SimpleGenerator(object):
@ -78,10 +78,9 @@ class UriStyleGenerator(object):
uris - a list of URIs as strings.
'''
self.uri_count = len(uris)
self.missiles = cycle([(HttpAmmo(uri,
headers,
http_ver=http_ver).to_s(), None)
for uri in uris])
self.missiles = cycle([(
HttpAmmo(
uri, headers, http_ver=http_ver).to_s(), None) for uri in uris])
def __iter__(self):
for m in self.missiles:
@ -120,8 +119,8 @@ class AmmoFileReader(object):
if chunk_size == 0:
if info.status.loop_count == 0:
self.log.info(
'Zero-sized chunk in ammo file at %s. Starting over.' %
ammo_file.tell())
'Zero-sized chunk in ammo file at %s. Starting over.'
% ammo_file.tell())
ammo_file.seek(0)
info.status.inc_loop_count()
chunk_header = read_chunk_header(ammo_file)
@ -130,13 +129,13 @@ class AmmoFileReader(object):
missile = ammo_file.read(chunk_size)
if len(missile) < chunk_size:
raise AmmoFileError(
"Unexpected end of file: read %s bytes instead of %s" %
(len(missile), chunk_size))
"Unexpected end of file: read %s bytes instead of %s"
% (len(missile), chunk_size))
yield (missile, marker)
except (IndexError, ValueError) as e:
raise AmmoFileError(
"Error while reading ammo file. Position: %s, header: '%s', original exception: %s" %
(ammo_file.tell(), chunk_header, e))
"Error while reading ammo file. Position: %s, header: '%s', original exception: %s"
% (ammo_file.tell(), chunk_header, e))
chunk_header = read_chunk_header(ammo_file)
if chunk_header == '':
ammo_file.seek(0)
@ -242,12 +241,14 @@ class AccessLogReader(object):
method, uri, proto = request.split()
http_ver = proto.split('/')[1]
if method == "GET":
yield (HttpAmmo(uri,
headers=self.headers,
http_ver=http_ver, ).to_s(), None)
yield (
HttpAmmo(
uri,
headers=self.headers,
http_ver=http_ver, ).to_s(), None)
else:
self.warn("Skipped line: %s (unsupported method)" %
line)
self.warn(
"Skipped line: %s (unsupported method)" % line)
except (ValueError, IndexError) as e:
self.warn("Skipped line: %s (%s)" % (line, e))
ammo_file.seek(0)
@ -260,7 +261,6 @@ def _parse_header(header):
class UriReader(object):
def __init__(self, filename, headers=[], http_ver='1.1', **kwargs):
self.filename = filename
self.headers = {}
@ -278,8 +278,8 @@ class UriReader(object):
for line in ammo_file:
info.status.af_position = ammo_file.tell()
if line.startswith('['):
self.headers.update(_parse_header(line.strip(
'\r\n[]\t ')))
self.headers.update(
_parse_header(line.strip('\r\n[]\t ')))
elif len(line.rstrip('\r\n')):
fields = line.split()
uri = fields[0]
@ -287,13 +287,14 @@ class UriReader(object):
marker = fields[1]
else:
marker = None
yield (HttpAmmo(uri,
headers=[
': '.join(header)
for header in self.headers.items()
],
http_ver=self.http_ver, ).to_s(),
marker)
yield (
HttpAmmo(
uri,
headers=[
': '.join(header)
for header in self.headers.items()
],
http_ver=self.http_ver, ).to_s(), marker)
if info.status.ammo_count == 0:
self.log.error("No ammo in uri-style file")
raise AmmoFileError("No ammo! Cover me!")
@ -339,8 +340,8 @@ class UriPostReader(object):
chunk_size = int(fields[0])
if chunk_size == 0:
self.log.debug(
'Zero-sized chunk in ammo file at %s. Starting over.' %
ammo_file.tell())
'Zero-sized chunk in ammo file at %s. Starting over.'
% ammo_file.tell())
ammo_file.seek(0)
info.status.inc_loop_count()
chunk_header = read_chunk_header(ammo_file)
@ -350,21 +351,22 @@ class UriPostReader(object):
missile = ammo_file.read(chunk_size)
if len(missile) < chunk_size:
raise AmmoFileError(
"Unexpected end of file: read %s bytes instead of %s" %
(len(missile), chunk_size))
yield (HttpAmmo(uri=uri,
headers=[
': '.join(header)
for header in self.headers.items()
],
method='POST',
body=missile,
http_ver=self.http_ver, ).to_s(),
marker)
"Unexpected end of file: read %s bytes instead of %s"
% (len(missile), chunk_size))
yield (
HttpAmmo(
uri=uri,
headers=[
': '.join(header)
for header in self.headers.items()
],
method='POST',
body=missile,
http_ver=self.http_ver, ).to_s(), marker)
except (IndexError, ValueError) as e:
raise AmmoFileError(
"Error while reading ammo file. Position: %s, header: '%s', original exception: %s" %
(ammo_file.tell(), chunk_header, e))
"Error while reading ammo file. Position: %s, header: '%s', original exception: %s"
% (ammo_file.tell(), chunk_header, e))
chunk_header = read_chunk_header(ammo_file)
if chunk_header == '':
self.log.debug(

View File

@ -5,25 +5,48 @@ from yandextank.stepper.util import take
class TestCreate(object):
@pytest.mark.parametrize('n, loadplan, expected', [
(7, LoadPlanBuilder().ramp(5, 4000).create(), [0, 1000, 2000, 3000, 4000, 0, 0]),
(7, create(['ramp(5, 4s)']), [0, 1000, 2000, 3000, 4000, 0, 0]),
(12, create(['ramp(5, 4s)', 'wait(5s)', 'ramp(5,4s)']), [0, 1000, 2000, 3000, 4000, 9000, 10000, 11000, 12000, 13000, 0, 0]),
(7, create(['wait(5s)', 'ramp(5, 0)']), [5000, 5000, 5000, 5000, 5000, 0, 0]),
(7, create([]), [0, 0, 0, 0, 0, 0, 0]),
(12, create(['line(1, 9, 4s)']), [0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 0, 0, 0]),
(12, create(['const(3, 5s)', 'line(7, 11, 2s)']), [0, 0, 0, 5000, 5000, 5000, 5000, 5500, 6000, 6500, 7000, 0]),
(12, create(['step(2, 10, 2, 3s)']), [0, 0, 3000, 3000, 6000, 6000, 9000, 9000, 12000, 12000, 0, 0]),
(12, LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).steps, [(3, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1)]),
(12, LoadPlanBuilder().stairway(100, 950, 100, 30000).steps, [(100, 30), (200, 30), (300, 30), (400, 30), (500, 30), (600, 30), (700, 30), (800, 30), (900, 30), (950, 30)])])
@pytest.mark.parametrize(
'n, loadplan, expected',
[(
7, LoadPlanBuilder().ramp(5, 4000).create(),
[0, 1000, 2000, 3000, 4000, 0, 0]
), (
7, create(['ramp(5, 4s)']),
[0, 1000, 2000, 3000, 4000, 0, 0]
), (
12, create(['ramp(5, 4s)', 'wait(5s)', 'ramp(5,4s)']),
[0, 1000, 2000, 3000, 4000, 9000, 10000, 11000, 12000, 13000, 0, 0]
), (
7, create(['wait(5s)', 'ramp(5, 0)']),
[5000, 5000, 5000, 5000, 5000, 0, 0]
), (
7, create([]),
[0, 0, 0, 0, 0, 0, 0]
), (
12, create(['line(1, 9, 4s)']),
[0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 0, 0, 0]
), (
12, create(['const(3, 5s)', 'line(7, 11, 2s)']),
[0, 0, 0, 5000, 5000, 5000, 5000, 5500, 6000, 6500, 7000, 0]
), (
12, create(['step(2, 10, 2, 3s)']),
[0, 0, 3000, 3000, 6000, 6000, 9000, 9000, 12000, 12000, 0, 0]
), (
12, LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).steps,
[(3, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1)]
), (
12, LoadPlanBuilder().stairway(100, 950, 100, 30000).steps,
[
(100, 30), (200, 30), (300, 30), (400, 30), (500, 30),
(600, 30), (700, 30), (800, 30), (900, 30), (950, 30)]
)]) # yapf:disable
def test_steps(self, n, loadplan, expected):
assert take(n, loadplan) == expected
@pytest.mark.parametrize('loadplan, expected', [
(LoadPlanBuilder().stairway(100, 950, 100, 30000), 950),
(LoadPlanBuilder().const(3, 1000).line(5, 10, 5000), 10),
(LoadPlanBuilder().line(1, 100, 60000), 100)
])
@pytest.mark.parametrize(
'loadplan, expected',
[(LoadPlanBuilder().stairway(100, 950, 100, 30000), 950),
(LoadPlanBuilder().const(3, 1000).line(5, 10, 5000), 10),
(LoadPlanBuilder().line(1, 100, 60000), 100)])
def test_instances(self, loadplan, expected):
assert loadplan.instances == expected

View File

@ -4,7 +4,6 @@ from yandextank.stepper.util import take
class TestLine(object):
def test_get_rps_list(self):
lp = create(["line(1, 100, 10s)"])
rps_list = lp.get_rps_list()
@ -12,23 +11,19 @@ class TestLine(object):
assert rps_list[-1][0] == 100
@pytest.mark.parametrize("rps, duration, rps_list", [
(100, 3000, [(100, 3)]),
(0, 3000, [(0, 3)]),
(100, 0, [(100, 0)])
])
@pytest.mark.parametrize(
"rps, duration, rps_list",
[(100, 3000, [(100, 3)]), (0, 3000, [(0, 3)]), (100, 0, [(100, 0)])])
class TestConst(object):
@pytest.mark.parametrize("check_point, expected", [
(lambda duration: 0, lambda rps: rps),
(lambda duration: duration / 2, lambda rps: rps),
(lambda duration: duration + 1, lambda rps: 0),
(lambda duration: -1, lambda rps: 0)
])
@pytest.mark.parametrize(
"check_point, expected",
[(lambda duration: 0, lambda rps: rps),
(lambda duration: duration / 2, lambda rps: rps),
(lambda duration: duration + 1, lambda rps: 0),
(lambda duration: -1, lambda rps: 0)])
def test_rps_at(self, rps, duration, rps_list, check_point, expected):
assert Const(
rps, duration).rps_at(
check_point(duration)) == expected(rps)
assert Const(rps,
duration).rps_at(check_point(duration)) == expected(rps)
def test_get_rps_list(self, rps, duration, rps_list):
assert Const(rps, duration).get_rps_list() == rps_list
@ -36,132 +31,118 @@ class TestConst(object):
class TestLineNew(object):
@pytest.mark.parametrize("min_rps, max_rps, duration, check_point, expected", [
(0, 10, 30 * 1000, 0, 0),
(0, 10, 30 * 1000, 10, 3),
(0, 10, 30 * 1000, 29, 10),
(9, 10, 30 * 1000, 1, 9),
(9, 10, 30 * 1000, 20, 10)
])
@pytest.mark.parametrize(
"min_rps, max_rps, duration, check_point, expected",
[(0, 10, 30 * 1000, 0, 0), (0, 10, 30 * 1000, 10, 3),
(0, 10, 30 * 1000, 29, 10), (9, 10, 30 * 1000, 1, 9),
(9, 10, 30 * 1000, 20, 10)])
def test_rps_at(self, min_rps, max_rps, duration, check_point, expected):
assert round(
Line(
min_rps,
max_rps,
duration).rps_at(check_point)) == expected
assert round(Line(min_rps, max_rps, duration).rps_at(
check_point)) == expected
@pytest.mark.parametrize("min_rps, max_rps, duration, check_point, expected", [
(0, 10, 20 * 1000, 9, (9, 2)),
(0, 10, 30 * 1000, 0, (0, 2)),
(0, 10, 30 * 1000, 5, (5, 3)),
(0, 10, 30 * 1000, 10, (10, 2)),
(0, 10, 3 * 1000, 0, (0, 1)),
(0, 10, 3 * 1000, 1, (3, 1)),
(0, 10, 3 * 1000, 2, (7, 1)),
(0, 10, 3 * 1000, 3, (10, 1)),
(9, 10, 30 * 1000, 0, (9, 15)),
(9, 10, 30 * 1000, 1, (10, 16)),
(10, 10, 30 * 1000, 0, (10, 31)), # strange
(10, 0, 30 * 1000, 0, (10, 2)),
(10, 0, 30 * 1000, 1, (9, 3)),
(10, 0, 30 * 1000, 9, (1, 3)),
(10, 0, 30 * 1000, 10, (0, 2)),
])
@pytest.mark.parametrize(
"min_rps, max_rps, duration, check_point, expected",
[
(0, 10, 20 * 1000, 9, (9, 2)),
(0, 10, 30 * 1000, 0, (0, 2)),
(0, 10, 30 * 1000, 5, (5, 3)),
(0, 10, 30 * 1000, 10, (10, 2)),
(0, 10, 3 * 1000, 0, (0, 1)),
(0, 10, 3 * 1000, 1, (3, 1)),
(0, 10, 3 * 1000, 2, (7, 1)),
(0, 10, 3 * 1000, 3, (10, 1)),
(9, 10, 30 * 1000, 0, (9, 15)),
(9, 10, 30 * 1000, 1, (10, 16)),
(10, 10, 30 * 1000, 0, (10, 31)), # strange
(10, 0, 30 * 1000, 0, (10, 2)),
(10, 0, 30 * 1000, 1, (9, 3)),
(10, 0, 30 * 1000, 9, (1, 3)),
(10, 0, 30 * 1000, 10, (0, 2)),
])
def test_get_rps_list(
self,
min_rps,
max_rps,
duration,
check_point,
expected):
assert Line(min_rps, max_rps, duration).get_rps_list()[
check_point] == expected
self, min_rps, max_rps, duration, check_point, expected):
assert Line(min_rps, max_rps,
duration).get_rps_list()[check_point] == expected
@pytest.mark.parametrize("min_rps, max_rps, duration, expected_len, threshold, len_above_threshold", [
(2, 12, 25000, 175, 5000, 160),
(2, 12, 25000, 175, 10000, 135),
(2, 12, 25000, 175, 15000, 100),
(2, 12, 25000, 175, 20000, 55),
(0, 10, 25000, 125, 15000, 80),
(10, 12, 20000, 220, 10000, 115),
(10, 10, 20000, 200, 10000, 100),
(10, 0, 25000, 125, 10000, 45),
(10, 0, 25000, 125, 15000, 20),
])
@pytest.mark.parametrize(
"min_rps, max_rps, duration, expected_len, threshold, len_above_threshold",
[
(2, 12, 25000, 175, 5000, 160),
(2, 12, 25000, 175, 10000, 135),
(2, 12, 25000, 175, 15000, 100),
(2, 12, 25000, 175, 20000, 55),
(0, 10, 25000, 125, 15000, 80),
(10, 12, 20000, 220, 10000, 115),
(10, 10, 20000, 200, 10000, 100),
(10, 0, 25000, 125, 10000, 45),
(10, 0, 25000, 125, 15000, 20),
])
def test_iter(
self,
min_rps,
max_rps,
duration,
expected_len,
threshold,
self, min_rps, max_rps, duration, expected_len, threshold,
len_above_threshold):
load_plan = Line(min_rps, max_rps, duration)
assert len(load_plan) == expected_len
assert len([ts for ts in load_plan if ts >= threshold]
) == len_above_threshold
assert len(
[ts for ts in load_plan if ts >= threshold]) == len_above_threshold
class TestComposite(object):
@pytest.mark.parametrize("steps, expected_len", [
([Line(0, 10, 20000), Const(10, 10000)], 200),
([Line(0, 10, 20000), Line(10, 0, 20000)], 200),
([Const(5, 10000), Const(10, 5000)], 100)
])
@pytest.mark.parametrize(
"steps, expected_len", [([Line(0, 10, 20000), Const(10, 10000)], 200),
([Line(0, 10, 20000), Line(10, 0, 20000)], 200),
([Const(5, 10000), Const(10, 5000)], 100)])
def test_iter(self, steps, expected_len):
assert len(Composite(steps)) == expected_len
@pytest.mark.parametrize("steps, check_point, expected", [
([Line(0, 10, 20000), Const(10, 10000)], 9, (9, 2)),
([Line(0, 10, 20000), Const(10, 10000)], 10, (10, 2)),
([Line(0, 10, 20000), Const(10, 10000)], 11, (10, 10)),
])
@pytest.mark.parametrize(
"steps, check_point, expected", [
([Line(0, 10, 20000), Const(10, 10000)], 9, (9, 2)),
([Line(0, 10, 20000), Const(10, 10000)], 10, (10, 2)),
([Line(0, 10, 20000), Const(10, 10000)], 11, (10, 10)),
])
def test_rps_list(self, steps, check_point, expected):
assert Composite(steps).get_rps_list()[check_point] == expected
class TestStairway(object):
@pytest.mark.parametrize("min_rps, max_rps, increment, step_duration, expected_len, threshold, len_above_threshold",
[
(0, 1000, 50, 3000, 31500, 9000, 31050),
(0, 1000, 50, 3000, 31500, 15000, 30000),
(0, 1000, 50, 3000, 31500, 45000, 15750)
])
@pytest.mark.parametrize(
"min_rps, max_rps, increment, step_duration, expected_len, threshold, len_above_threshold",
[(0, 1000, 50, 3000, 31500, 9000, 31050),
(0, 1000, 50, 3000, 31500, 15000, 30000),
(0, 1000, 50, 3000, 31500, 45000, 15750)])
def test_iter(
self,
min_rps,
max_rps,
increment,
step_duration,
expected_len,
threshold,
len_above_threshold):
self, min_rps, max_rps, increment, step_duration, expected_len,
threshold, len_above_threshold):
load_plan = Stairway(min_rps, max_rps, increment, step_duration)
assert len(load_plan) == expected_len
assert len([ts for ts in load_plan if ts >= threshold]
) == len_above_threshold
assert len(
[ts for ts in load_plan if ts >= threshold]) == len_above_threshold
class TestCreate(object):
@pytest.mark.parametrize('rps_schedule, check_point, expected', [
(['line(1, 5, 2s)'], 100, [0, 618, 1000, 1302, 1561, 1791]),
(['line(1.1, 5.8, 2s)'], 100, [0, 566, 917, 1196, 1435, 1647]),
(['line(5, 1, 2s)'], 100, [0, 208, 438, 697, 1000, 1381]),
(['const(1, 10s)'], 100, [0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]),
(['const(200, 0.1s)'], 100, [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95]),
(['const(1, 2s)', 'const(2, 2s)'], 100, [0, 1000, 2000, 2500, 3000, 3500]),
(['const(1.5, 10s)'], 100, [0, 666, 1333, 2000, 2666, 3333, 4000, 4666, 5333, 6000, 6666, 7333, 8000, 8666, 9333]),
(['step(1, 5, 1, 5s)'], 10, [0, 1000, 2000, 3000, 4000, 5000, 5500, 6000, 6500, 7000]),
(['step(1.2, 5.7, 1.1, 5s)'], 10, [0, 833, 1666, 2500, 3333, 4166, 5000, 5434, 5869, 6304]),
(['const(1, 1)'], 10, [0]),
])
@pytest.mark.parametrize(
'rps_schedule, check_point, expected', [
(['line(1, 5, 2s)'], 100, [0, 618, 1000, 1302, 1561, 1791]),
(['line(1.1, 5.8, 2s)'], 100, [0, 566, 917, 1196, 1435, 1647]),
(['line(5, 1, 2s)'], 100, [0, 208, 438, 697, 1000, 1381]),
(['const(1, 10s)'], 100,
[0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]),
(['const(200, 0.1s)'], 100, [
0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75,
80, 85, 90, 95
]),
(['const(1, 2s)', 'const(2, 2s)'], 100,
[0, 1000, 2000, 2500, 3000, 3500]),
(['const(1.5, 10s)'], 100, [
0, 666, 1333, 2000, 2666, 3333, 4000, 4666, 5333, 6000, 6666,
7333, 8000, 8666, 9333
]),
(['step(1, 5, 1, 5s)'], 10,
[0, 1000, 2000, 3000, 4000, 5000, 5500, 6000, 6500, 7000]),
(['step(1.2, 5.7, 1.1, 5s)'], 10,
[0, 833, 1666, 2500, 3333, 4166, 5000, 5434, 5869, 6304]),
(['const(1, 1)'], 10, [0]),
])
def test_create(self, rps_schedule, check_point, expected):
# pytest.set_trace()
assert take(check_point, (create(rps_schedule))) == expected

View File

@ -31,7 +31,12 @@ def parse_duration(duration):
_re_token = re.compile("([0-9.]+)([dhms]?)")
def parse_token(time, multiplier):
multipliers = {'d': 86400, 'h': 3600, 'm': 60, 's': 1, }
multipliers = {
'd': 86400,
'h': 3600,
'm': 60,
's': 1,
}
if multiplier:
if multiplier in multipliers:
return int(float(time) * multipliers[multiplier] * 1000)