Commit f8374ee4 authored by Sergiy Belozorov's avatar Sergiy Belozorov Committed by Commit Bot

[tools] Export runtime duration and timeout for each runnable

R=machenbach@chromium.org, tmrts@chromium.org

Bug: chromium:841700
Change-Id: I9852ccc573eda6b1bab7a7db295f7fd6e7410581
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1570010
Commit-Queue: Sergiy Belozorov <sergiyb@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60967}
parent e0e36ba0
......@@ -159,8 +159,12 @@ class Results(object):
def __init__(self, traces=None, errors=None):
self.traces = traces or []
self.errors = errors or []
# TODO(sergiyb): Deprecate self.timeouts/near_timeouts and compute them in
# the recipe based on self.runnable_durations. Also cleanup RunnableConfig
# by removing has_timeouts/has_near_timeouts there.
self.timeouts = []
self.near_timeouts = [] # > 90% of the max runtime
self.runnable_durations = []
def ToDict(self):
return {
......@@ -168,6 +172,7 @@ class Results(object):
'errors': self.errors,
'timeouts': self.timeouts,
'near_timeouts': self.near_timeouts,
'runnable_durations': self.runnable_durations,
}
def WriteToFile(self, file_name):
......@@ -179,6 +184,7 @@ class Results(object):
self.errors += other.errors
self.timeouts += other.timeouts
self.near_timeouts += other.near_timeouts
self.runnable_durations += other.runnable_durations
return self
def __str__(self): # pragma: no cover
......@@ -596,7 +602,7 @@ class Platform(object):
def _Run(self, runnable, count, secondary=False):
raise NotImplementedError() # pragma: no cover
def _TimedRun(self, runnable, count, secondary=False):
def _LoggedRun(self, runnable, count, secondary=False):
suffix = ' - secondary' if secondary else ''
title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
try:
......@@ -629,9 +635,9 @@ class Platform(object):
Returns: A tuple with the two benchmark outputs. The latter will be None if
options.shell_dir_secondary was not specified.
"""
output = self._TimedRun(runnable, count, secondary=False)
output = self._LoggedRun(runnable, count, secondary=False)
if self.shell_dir_secondary:
return output, self._TimedRun(runnable, count, secondary=True)
return output, self._LoggedRun(runnable, count, secondary=True)
else:
return output, NULL_OUTPUT
......@@ -669,9 +675,7 @@ class DesktopPlatform(Platform):
node.ChangeCWD(path)
def _Run(self, runnable, count, secondary=False):
suffix = ' - secondary' if secondary else ''
shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
output = cmd.execute()
......@@ -1034,6 +1038,8 @@ def Main(args):
runnable_name + '/' != options.filter):
continue
logging.info('>>> Running suite: %s', runnable_name)
durations = []
durations_secondary = []
def Runner():
"""Output generator that reruns several times."""
......@@ -1043,6 +1049,9 @@ def Main(args):
while attempts_left:
output, output_secondary = platform.Run(runnable, i)
if output.IsSuccess() and output_secondary.IsSuccess():
durations.append(output.duration)
if output_secondary is not NULL_OUTPUT:
durations_secondary.append(output_secondary.duration)
yield output, output_secondary
break
attempts_left -= 1
......@@ -1060,6 +1069,18 @@ def Main(args):
results.timeouts.append(runnable_name)
if runnable.has_near_timeouts:
results.near_timeouts.append(runnable_name)
results.runnable_durations.append({
'graphs': runnable.graphs,
'durations': durations,
'timeout': runnable.timeout,
})
if durations_secondary:
results_secondary.runnable_durations.append({
'graphs': runnable.graphs,
'durations': durations_secondary,
'timeout': runnable.timeout,
})
platform.PostExecution()
if options.json_test_results:
......
......@@ -31,6 +31,7 @@ V8_JSON = {
'path': ['.'],
'owners': ['username@chromium.org'],
'binary': 'd7',
'timeout': 60,
'flags': ['--flag'],
'main': 'run.js',
'run_count': 1,
......@@ -126,7 +127,8 @@ class PerfTest(unittest.TestCase):
# Fake output for each test run.
test_outputs = [Output(stdout=arg,
timed_out=kwargs.get('timed_out', False),
exit_code=kwargs.get('exit_code', 0))
exit_code=kwargs.get('exit_code', 0),
duration=42)
for arg in args[1]]
def create_cmd(*args, **kwargs):
cmd = mock.MagicMock()
......@@ -170,6 +172,13 @@ class PerfTest(unittest.TestCase):
'stddev': trace['stddev']} for trace in traces],
self._LoadResults(file_name)['traces'])
def _VerifyRunnableDurations(self, runs, timeout, file_name=None):
self.assertEquals([{
'graphs': ['test'],
'durations': [42] * runs,
'timeout': timeout,
}], self._LoadResults(file_name)['runnable_durations'])
def _VerifyErrors(self, errors):
self.assertEquals(errors, self._LoadResults()['errors'])
......@@ -200,6 +209,7 @@ class PerfTest(unittest.TestCase):
{'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
self._VerifyRunnableDurations(1, 60)
self._VerifyErrors([])
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
......@@ -460,6 +470,7 @@ class PerfTest(unittest.TestCase):
{'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['200.0', '200.0'], 'stddev': ''},
], test_output_secondary)
self._VerifyRunnableDurations(2, 60, test_output_secondary)
self._VerifyErrors([])
self._VerifyMockMultiple(
(os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment