Commit a42a2f41 authored by Camillo Bruni's avatar Camillo Bruni Committed by V8 LUCI CQ

[tools] Add variants support for run_perf.py

We usually run benchmarks in multiple variants: default, future, noopt
This is currently only achieved by copying the run-perf json file and
changing the flags at the top-level (or copy whole subsections).

Using "variants" we can duplicate the tests at the current level with
different values and easily create benchmarks that differ only in v8
flags.

Drive-by-fix:
- Add Node.__iter__ and log the whole config graph in debug mode
- Add GraphConfig.__str__ method for better debugging
- Rename TraceConfig to LeafTraceConfig
- Rename RunnableTraceConfig to RunnableLeafTraceConfig
- Make --filter accept a regexp to better filter out variants

Bug: v8:12821, v8:11113
Change-Id: I56a2ba2dd24da15c7757406e9961746219cd8061
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3596128Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Reviewed-by: 's avatarTamer Tas <tmrts@chromium.org>
Commit-Queue: Camillo Bruni <cbruni@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80307}
parent 0e9a55d2
This diff is collapsed.
......@@ -42,6 +42,35 @@ V8_JSON = {
]
}
V8_VARIANTS_JSON = {
'path': ['.'],
'owners': ['username@chromium.org'],
'binary': 'd7',
'timeout': 60,
'flags': ['--flag'],
'main': 'run.js',
'run_count': 1,
'results_regexp': '%s: (.+)$',
'variants': [{
'name': 'default',
'flags': [],
}, {
'name': 'VariantA',
'flags': ['--variant-a-flag'],
}, {
'name': 'VariantB',
'flags': ['--variant-b-flag'],
}],
'tests': [
{
'name': 'Richards',
},
{
'name': 'DeltaBlue',
},
]
}
V8_NESTED_SUITES_JSON = {
'path': ['.'],
'owners': ['username@chromium.org'],
......@@ -138,14 +167,17 @@ class PerfTest(unittest.TestCase):
with open(self._test_input, 'w') as f:
f.write(json.dumps(json_content))
def _MockCommand(self, *args, **kwargs):
def _MockCommand(self, raw_dirs, raw_outputs, *args, **kwargs):
on_bots = kwargs.pop('on_bots', False)
# Fake output for each test run.
test_outputs = [Output(stdout=arg,
timed_out=kwargs.get('timed_out', False),
exit_code=kwargs.get('exit_code', 0),
duration=42)
for arg in args[1]]
test_outputs = [
Output(
stdout=output,
timed_out=kwargs.get('timed_out', False),
exit_code=kwargs.get('exit_code', 0),
duration=42) for output in raw_outputs
]
def create_cmd(*args, **kwargs):
cmd = mock.MagicMock()
def execute(*args, **kwargs):
......@@ -168,9 +200,16 @@ class PerfTest(unittest.TestCase):
mock.MagicMock(side_effect=return_values)).start()
# Check that d8 is called from the correct cwd for each test run.
dirs = [os.path.join(TEST_WORKSPACE, arg) for arg in args[0]]
def chdir(*args, **kwargs):
self.assertEqual(dirs.pop(), args[0])
dirs = [os.path.join(TEST_WORKSPACE, dir) for dir in raw_dirs]
def chdir(dir, *args, **kwargs):
if not dirs:
raise Exception("Missing test chdir '%s'" % dir)
expected_dir = dirs.pop()
self.assertEqual(
expected_dir, dir,
"Unexpected chdir: expected='%s' got='%s'" % (expected_dir, dir))
os.chdir = mock.MagicMock(side_effect=chdir)
subprocess.check_call = mock.MagicMock()
......@@ -190,6 +229,12 @@ class PerfTest(unittest.TestCase):
with open(file_name or self._test_output) as f:
return json.load(f)
def _VerifyResultTraces(self, traces, file_name=None):
sorted_expected = sorted(traces, key=SORT_KEY)
sorted_results = sorted(
self._LoadResults(file_name)['traces'], key=SORT_KEY)
self.assertListEqual(sorted_expected, sorted_results)
def _VerifyResults(self, suite, units, traces, file_name=None):
self.assertListEqual(sorted([
{'units': units,
......@@ -244,6 +289,60 @@ class PerfTest(unittest.TestCase):
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
def testOneRunVariants(self):
self._WriteTestInput(V8_VARIANTS_JSON)
self._MockCommand(['.', '.', '.'], [
'x\nRichards: 3.3\nDeltaBlue: 3000\ny\n',
'x\nRichards: 2.2\nDeltaBlue: 2000\ny\n',
'x\nRichards: 1.1\nDeltaBlue: 1000\ny\n'
])
self.assertEqual(0, self._CallMain())
self._VerifyResultTraces([
{
'units': 'score',
'graphs': ['test', 'default', 'Richards'],
'results': [1.1],
'stddev': ''
},
{
'units': 'score',
'graphs': ['test', 'default', 'DeltaBlue'],
'results': [1000],
'stddev': ''
},
{
'units': 'score',
'graphs': ['test', 'VariantA', 'Richards'],
'results': [2.2],
'stddev': ''
},
{
'units': 'score',
'graphs': ['test', 'VariantA', 'DeltaBlue'],
'results': [2000],
'stddev': ''
},
{
'units': 'score',
'graphs': ['test', 'VariantB', 'Richards'],
'results': [3.3],
'stddev': ''
},
{
'units': 'score',
'graphs': ['test', 'VariantB', 'DeltaBlue'],
'results': [3000],
'stddev': ''
},
])
self._VerifyErrors([])
self._VerifyMockMultiple(
(os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
(os.path.join('out', 'x64.release',
'd7'), '--flag', '--variant-a-flag', 'run.js'),
(os.path.join('out', 'x64.release',
'd7'), '--flag', '--variant-b-flag', 'run.js'))
def testOneRunWithTestFlags(self):
test_input = dict(V8_JSON)
test_input['test_flags'] = ['2', 'test_name']
......@@ -378,6 +477,7 @@ class PerfTest(unittest.TestCase):
(os.path.join('out', 'x64.release', 'd8'),
'--flag', '--flag2', 'run.js'))
def testOneRunStdDevRegExp(self):
test_input = dict(V8_JSON)
test_input['stddev_regexp'] = r'^%s-stddev: (.+)$'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment