Commit 631c4d4f authored by Sergiy Belozorov's avatar Sergiy Belozorov Committed by Commit Bot

[tools] Refactor Results class into ResultsTracker class based on dicts

This is part of the refactoring needed to implement a feature that allows
re-running benchmarks until needed confidence is reached.

R=machenbach@chromium.org

No-Try: true
No-Tree-Checks: true
Bug: chromium:880724
Change-Id: I45f584a3503ecf567f4c2661a302a74fc5e516af
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1581605
Commit-Queue: Sergiy Belozorov <sergiyb@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61037}
parent 5d0cf6bc
......@@ -153,41 +153,93 @@ def GeometricMean(values):
return str(math.exp(sum(map(math.log, values)) / len(values)))
class Results(object):
"""Place holder for result traces."""
def __init__(self, traces=None, errors=None):
self.traces = traces or []
self.errors = errors or []
class ResultTracker(object):
"""Class that tracks trace/runnable results and produces script output.
The output is structured like this:
{
"traces": [
{
"graphs": ["path", "to", "trace", "config"],
"units": <string describing units, e.g. "ms" or "KB">,
"results": [<list of values measured over several runs>],
"stddev": <stddev of the value if measure by script or ''>
},
...
],
"runnables": [
{
"graphs": ["path", "to", "runnable", "config"],
"durations": [<list of durations of each runnable run in seconds>],
"timeout": <timeout configured for runnable in seconds>,
},
...
],
"errors": [<list of strings describing errors>],
# These two fields are deprecated and will soon be removed.
"timeouts": [<list of traces which have timed out at least once>],
"near_timeouts": [<list of traces which have at least once run for longer
than 90% of the configured timeout>],
}
"""
def __init__(self):
self.traces = {}
self.errors = []
# TODO(sergiyb): Deprecate self.timeouts/near_timeouts and compute them in
# the recipe based on self.runnable_durations. Also cleanup RunnableConfig
# by removing has_timeouts/has_near_timeouts there.
self.timeouts = []
self.near_timeouts = [] # > 90% of the max runtime
self.runnable_durations = []
self.runnables = {}
def AddTraceResults(self, trace, results, stddev):
if trace.name not in self.traces:
self.traces[trace.name] = {
'graphs': trace.graphs,
'units': trace.units,
'results': results,
'stddev': stddev or '',
}
else:
existing_entry = self.traces[trace.name]
assert trace.graphs == existing_entry['graphs']
assert trace.units == existing_entry['units']
assert not (stddev and existing_entry['stddev'])
existing_entry['stddev'] = stddev
existing_entry['results'].extend(results)
def AddErrors(self, errors):
self.errors.extend(errors)
def AddRunnableDurations(self, runnable, durations):
"""Adds a list of durations of the different runs of the runnable."""
if runnable.name not in self.runnables:
self.runnables[runnable.name] = {
'graphs': runnable.graphs,
'durations': durations,
'timeout': runnable.timeout,
}
else:
existing_entry = self.runnables[runnable.name]
assert runnable.timeout == existing_entry['timeout']
assert runnable.graphs == existing_entry['graphs']
existing_entry['durations'].extend(durations)
def ToDict(self):
return {
'traces': self.traces,
'traces': self.traces.values(),
'errors': self.errors,
'timeouts': self.timeouts,
'near_timeouts': self.near_timeouts,
'runnable_durations': self.runnable_durations,
'runnables': self.runnables.values(),
}
def WriteToFile(self, file_name):
with open(file_name, 'w') as f:
f.write(json.dumps(self.ToDict()))
def __add__(self, other):
self.traces += other.traces
self.errors += other.errors
self.timeouts += other.timeouts
self.near_timeouts += other.near_timeouts
self.runnable_durations += other.runnable_durations
return self
def __str__(self): # pragma: no cover
return str(self.ToDict())
return json.dumps(self.ToDict(), indent=2, separators=(',', ': '))
class Measurement(object):
......@@ -196,10 +248,8 @@ class Measurement(object):
The results are from repetitive runs of the same executable. They are
gathered by repeated calls to ConsumeOutput.
"""
def __init__(self, graphs, units, results_regexp, stddev_regexp):
self.name = '/'.join(graphs)
self.graphs = graphs
self.units = units
def __init__(self, trace, results_regexp, stddev_regexp):
self.trace = trace
self.results_regexp = results_regexp
self.stddev_regexp = stddev_regexp
self.results = []
......@@ -212,29 +262,28 @@ class Measurement(object):
self.results.append(str(float(result)))
except ValueError:
self.errors.append('Regexp "%s" returned a non-numeric for test %s.'
% (self.results_regexp, self.name))
% (self.results_regexp, self.trace.name))
except:
self.errors.append('Regexp "%s" did not match for test %s.'
% (self.results_regexp, self.name))
% (self.results_regexp, self.trace.name))
try:
if self.stddev_regexp and self.stddev:
self.errors.append('Test %s should only run once since a stddev '
'is provided by the test.' % self.name)
'is provided by the test.' % self.trace.name)
if self.stddev_regexp:
self.stddev = re.search(
self.stddev_regexp, output.stdout, re.M).group(1)
except:
self.errors.append('Regexp "%s" did not match for test %s.'
% (self.stddev_regexp, self.name))
% (self.stddev_regexp, self.trace.name))
def UpdateResults(self, result_tracker):
result_tracker.AddTraceResults(self.trace, self.results, self.stddev)
result_tracker.AddErrors(self.errors)
def GetResults(self):
return Results([{
'graphs': self.graphs,
'units': self.units,
'results': self.results,
'stddev': self.stddev,
}], self.errors)
return self.results
class NullMeasurement(object):
......@@ -244,8 +293,11 @@ class NullMeasurement(object):
def ConsumeOutput(self, output):
pass
def UpdateResults(self, result_tracker):
pass
def GetResults(self):
return Results()
return []
def Unzip(iterable):
......@@ -277,52 +329,49 @@ def RunResultsProcessor(results_processor, output, count):
def AccumulateResults(
graph_names, trace_configs, output_iter, perform_measurement, calc_total):
graph, output_iter, perform_measurement, calc_total, result_tracker):
"""Iterates over the output of multiple benchmark reruns and accumulates
results for a configured list of traces.
Args:
graph_names: List of names that configure the base path of the traces. E.g.
['v8', 'Octane'].
trace_configs: List of 'TraceConfig' instances. Each trace config defines
how to perform a measurement.
graph: Parent GraphConfig for which results are to be accumulated.
output_iter: Iterator over the output of each test run.
perform_measurement: Whether to actually run tests and perform measurements.
This is needed so that we reuse this script for both CI
and trybot, but want to ignore second run on CI without
having to spread this logic throughout the script.
calc_total: Boolean flag to specify the calculation of a summary trace.
Returns: A 'Results' object.
result_tracker: ResultTracker object to be updated.
"""
measurements = [
trace.CreateMeasurement(perform_measurement) for trace in trace_configs]
measurements = [trace.CreateMeasurement(perform_measurement)
for trace in graph.children]
for output in output_iter():
for measurement in measurements:
measurement.ConsumeOutput(output)
res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())
for measurement in measurements:
measurement.UpdateResults(result_tracker)
if not res.traces or not calc_total:
return res
raw_results = [m.GetResults() for m in measurements]
if not raw_results or not calc_total:
return
# Assume all traces have the same structure.
if len(set(map(lambda t: len(t['results']), res.traces))) != 1:
res.errors.append('Not all traces have the same number of results.')
return res
if len(set(map(len, raw_results))) != 1:
result_tracker.AddErrors(
['Not all traces have the same number of results. Can not compute '
'total for %s' % graph.name])
return
# Calculate the geometric means for all traces. Above we made sure that
# there is at least one trace and that the number of results is the same
# for each trace.
n_results = len(res.traces[0]['results'])
total_results = [GeometricMean(t['results'][i] for t in res.traces)
n_results = len(raw_results[0])
total_results = [GeometricMean(r[i] for r in raw_results)
for i in range(0, n_results)]
res.traces.append({
'graphs': graph_names + ['Total'],
'units': res.traces[0]['units'],
'results': total_results,
'stddev': '',
})
return res
total_trace = TraceConfig(
{'name': 'Total', 'units': graph.children[0].units}, graph, graph.arch)
result_tracker.AddTraceResults(total_trace, total_results, '')
class Node(object):
......@@ -333,6 +382,10 @@ class Node(object):
def AppendChild(self, child):
self._children.append(child)
@property
def children(self):
return self._children
class DefaultSentinel(Node):
"""Fake parent node with all default values."""
......@@ -364,6 +417,7 @@ class GraphConfig(Node):
def __init__(self, suite, parent, arch):
super(GraphConfig, self).__init__()
self._suite = suite
self.arch = arch
assert isinstance(suite.get('path', []), list)
assert isinstance(suite.get('owners', []), list)
......@@ -414,6 +468,10 @@ class GraphConfig(Node):
stddev_default = None
self.stddev_regexp = suite.get('stddev_regexp', stddev_default)
@property
def name(self):
return '/'.join(self.graphs)
class TraceConfig(GraphConfig):
"""Represents a leaf in the suite tree structure."""
......@@ -426,12 +484,7 @@ class TraceConfig(GraphConfig):
if not perform_measurement:
return NullMeasurement()
return Measurement(
self.graphs,
self.units,
self.results_regexp,
self.stddev_regexp,
)
return Measurement(self, self.results_regexp, self.stddev_regexp)
class RunnableConfig(GraphConfig):
......@@ -486,24 +539,22 @@ class RunnableConfig(GraphConfig):
args=self.GetCommandFlags(extra_flags=extra_flags),
timeout=self.timeout or 60)
def Run(self, runner, trybot):
def Run(self, runner, secondary, result_tracker, results_secondary):
"""Iterates over several runs and handles the output for all traces."""
output, output_secondary = Unzip(runner())
return (
AccumulateResults(
self.graphs,
self._children,
self,
output_iter=self.PostProcess(output),
perform_measurement=True,
calc_total=self.total,
),
result_tracker=result_tracker,
)
AccumulateResults(
self.graphs,
self._children,
self,
output_iter=self.PostProcess(output_secondary),
perform_measurement=trybot, # only run second time on trybots
perform_measurement=secondary, # only run second time on trybots
calc_total=self.total,
),
result_tracker=results_secondary,
)
......@@ -512,17 +563,16 @@ class RunnableTraceConfig(TraceConfig, RunnableConfig):
def __init__(self, suite, parent, arch):
super(RunnableTraceConfig, self).__init__(suite, parent, arch)
def Run(self, runner, trybot):
def Run(self, runner, secondary, result_tracker, results_secondary):
"""Iterates over several runs and handles the output."""
measurement = self.CreateMeasurement(perform_measurement=True)
measurement_secondary = self.CreateMeasurement(perform_measurement=trybot)
measurement_secondary = self.CreateMeasurement(
perform_measurement=secondary)
for output, output_secondary in runner():
measurement.ConsumeOutput(output)
measurement_secondary.ConsumeOutput(output_secondary)
return (
measurement.GetResults(),
measurement_secondary.GetResults(),
)
measurement.UpdateResults(result_tracker)
measurement_secondary.UpdateResults(results_secondary)
def MakeGraphConfig(suite, arch, parent):
......@@ -998,15 +1048,16 @@ def Main(argv):
prev_cpu_gov = None
platform = Platform.GetPlatform(args)
results = Results()
results_secondary = Results()
result_tracker = ResultTracker()
result_tracker_secondary = ResultTracker()
# We use list here to allow modification in nested function below.
have_failed_tests = [False]
with CustomMachineConfiguration(governor = args.cpu_governor,
disable_aslr = args.noaslr) as conf:
for path in args.suite:
if not os.path.exists(path): # pragma: no cover
results.errors.append('Configuration file %s does not exist.' % path)
result_tracker.AddErrors([
'Configuration file %s does not exist.' % path])
continue
with open(path) as f:
......@@ -1055,39 +1106,31 @@ def Main(argv):
logging.info('>>> Retrying suite: %s', runnable_name)
# Let runnable iterate over all runs and handle output.
result, result_secondary = runnable.Run(
Runner, trybot=args.shell_dir_secondary)
results += result
results_secondary += result_secondary
runnable.Run(Runner, args.shell_dir_secondary, result_tracker,
result_tracker_secondary)
if runnable.has_timeouts:
results.timeouts.append(runnable_name)
result_tracker.timeouts.append(runnable_name)
if runnable.has_near_timeouts:
results.near_timeouts.append(runnable_name)
results.runnable_durations.append({
'graphs': runnable.graphs,
'durations': durations,
'timeout': runnable.timeout,
})
result_tracker.near_timeouts.append(runnable_name)
result_tracker.AddRunnableDurations(runnable, durations)
if durations_secondary:
results_secondary.runnable_durations.append({
'graphs': runnable.graphs,
'durations': durations_secondary,
'timeout': runnable.timeout,
})
result_tracker_secondary.AddRunnableDurations(
runnable, durations_secondary)
platform.PostExecution()
if args.json_test_results:
results.WriteToFile(args.json_test_results)
result_tracker.WriteToFile(args.json_test_results)
else: # pragma: no cover
print(results)
print('Primary results:', result_tracker)
if args.json_test_results_secondary:
results_secondary.WriteToFile(args.json_test_results_secondary)
result_tracker_secondary.WriteToFile(args.json_test_results_secondary)
else: # pragma: no cover
print(results_secondary)
print('Secondary results:', result_tracker_secondary)
if results.errors or have_failed_tests[0]:
if (result_tracker.errors or result_tracker_secondary.errors or
have_failed_tests[0]):
return 1
return 0
......
......@@ -144,7 +144,7 @@ class PerfTest(unittest.TestCase):
# Check that d8 is called from the correct cwd for each test run.
dirs = [os.path.join(TEST_WORKSPACE, arg) for arg in args[0]]
def chdir(*args, **kwargs):
self.assertEquals(dirs.pop(), args[0])
self.assertEqual(dirs.pop(), args[0])
os.chdir = mock.MagicMock(side_effect=chdir)
subprocess.check_call = mock.MagicMock()
......@@ -165,22 +165,24 @@ class PerfTest(unittest.TestCase):
return json.load(f)
def _VerifyResults(self, suite, units, traces, file_name=None):
self.assertEquals([
self.assertListEqual(sorted([
{'units': units,
'graphs': [suite, trace['name']],
'results': trace['results'],
'stddev': trace['stddev']} for trace in traces],
self._LoadResults(file_name)['traces'])
'stddev': trace['stddev']} for trace in traces]),
sorted(self._LoadResults(file_name)['traces']))
def _VerifyRunnableDurations(self, runs, timeout, file_name=None):
self.assertEquals([{
self.assertListEqual([
{
'graphs': ['test'],
'durations': [42] * runs,
'timeout': timeout,
}], self._LoadResults(file_name)['runnable_durations'])
},
], self._LoadResults(file_name)['runnables'])
def _VerifyErrors(self, errors):
self.assertEquals(errors, self._LoadResults()['errors'])
self.assertListEqual(errors, self._LoadResults()['errors'])
def _VerifyMock(self, binary, *args, **kwargs):
shell = os.path.join(os.path.dirname(BASE_DIR), binary)
......@@ -191,7 +193,7 @@ class PerfTest(unittest.TestCase):
timeout=kwargs.get('timeout', 60))
def _VerifyMockMultiple(self, *args, **kwargs):
self.assertEquals(len(args), len(command.Command.call_args_list))
self.assertEqual(len(args), len(command.Command.call_args_list))
for arg, actual in zip(args, command.Command.call_args_list):
expected = {
'cmd_prefix': [],
......@@ -199,12 +201,12 @@ class PerfTest(unittest.TestCase):
'args': list(arg[1:]),
'timeout': kwargs.get('timeout', 60)
}
self.assertEquals((expected, ), actual)
self.assertTupleEqual((expected, ), actual)
def testOneRun(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
self.assertEquals(0, self._CallMain())
self.assertEqual(0, self._CallMain())
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
......@@ -219,7 +221,7 @@ class PerfTest(unittest.TestCase):
test_input['test_flags'] = ['2', 'test_name']
self._WriteTestInput(test_input)
self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567'])
self.assertEquals(0, self._CallMain())
self.assertEqual(0, self._CallMain())
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
......@@ -237,7 +239,7 @@ class PerfTest(unittest.TestCase):
self._MockCommand(['.', '.'],
['Richards: 100\nDeltaBlue: 200\n',
'Richards: 50\nDeltaBlue: 300\n'])
self.assertEquals(0, self._CallMain())
self.assertEqual(0, self._CallMain())
self._VerifyResults('v8', 'ms', [
{'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['300.0', '200.0'], 'stddev': ''},
......@@ -256,7 +258,7 @@ class PerfTest(unittest.TestCase):
self._MockCommand(['.', '.'],
['Richards: 100\nDeltaBlue: 200\n',
'Richards: 50\nDeltaBlue: 300\n'])
self.assertEquals(0, self._CallMain())
self.assertEqual(0, self._CallMain())
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['300.0', '200.0'], 'stddev': ''},
......@@ -274,8 +276,8 @@ class PerfTest(unittest.TestCase):
'Simple: 3 ms.\n',
'Richards: 100\n',
'Richards: 50\n'])
self.assertEquals(0, self._CallMain())
self.assertEquals([
self.assertEqual(0, self._CallMain())
self.assertListEqual(sorted([
{'units': 'score',
'graphs': ['test', 'Richards'],
'results': ['50.0', '100.0'],
......@@ -288,7 +290,7 @@ class PerfTest(unittest.TestCase):
'graphs': ['test', 'DeltaBlue'],
'results': ['200.0'],
'stddev': ''},
], self._LoadResults()['traces'])
]), sorted(self._LoadResults()['traces']))
self._VerifyErrors([])
self._VerifyMockMultiple(
(os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
......@@ -305,7 +307,7 @@ class PerfTest(unittest.TestCase):
self._WriteTestInput(test_input)
self._MockCommand(['.'], ['Richards: 1.234\nRichards-stddev: 0.23\n'
'DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n'])
self.assertEquals(0, self._CallMain())
self.assertEqual(0, self._CallMain())
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': '0.23'},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': '106'},
......@@ -323,7 +325,7 @@ class PerfTest(unittest.TestCase):
'DeltaBlue: 6\nDeltaBlue-boom: 0.9\n',
'Richards: 2\nRichards-stddev: 0.5\n'
'DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n'])
self.assertEquals(1, self._CallMain())
self.assertEqual(1, self._CallMain())
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['2.0', '3.0'], 'stddev': '0.7'},
{'name': 'DeltaBlue', 'results': ['5.0', '6.0'], 'stddev': '0.8'},
......@@ -344,7 +346,7 @@ class PerfTest(unittest.TestCase):
mock.patch.object(
run_perf.Platform, 'ReadBuildConfig',
mock.MagicMock(return_value={'is_android': False})).start()
self.assertEquals(0, self._CallMain('--buildbot'))
self.assertEqual(0, self._CallMain('--buildbot'))
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
......@@ -360,7 +362,7 @@ class PerfTest(unittest.TestCase):
mock.patch.object(
run_perf.Platform, 'ReadBuildConfig',
mock.MagicMock(return_value={'is_android': False})).start()
self.assertEquals(0, self._CallMain('--buildbot'))
self.assertEqual(0, self._CallMain('--buildbot'))
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
......@@ -377,7 +379,7 @@ class PerfTest(unittest.TestCase):
mock.patch.object(
run_perf.Platform, 'ReadBuildConfig',
mock.MagicMock(return_value={'is_android': False})).start()
self.assertEquals(1, self._CallMain('--buildbot'))
self.assertEqual(1, self._CallMain('--buildbot'))
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': [], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
......@@ -385,13 +387,14 @@ class PerfTest(unittest.TestCase):
self._VerifyErrors(
['Regexp "^Richards: (.+)$" '
'returned a non-numeric for test test/Richards.',
'Not all traces have the same number of results.'])
'Not all traces have the same number of results. Can not compute '
'total for test'])
self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
def testRegexpNoMatch(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(['.'], ['x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n'])
self.assertEquals(1, self._CallMain())
self.assertEqual(1, self._CallMain())
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': [], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
......@@ -405,7 +408,7 @@ class PerfTest(unittest.TestCase):
self._WriteTestInput(V8_JSON)
self._MockCommand(
['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'], exit_code=-1)
self.assertEquals(1, self._CallMain())
self.assertEqual(1, self._CallMain())
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': [], 'stddev': ''},
{'name': 'DeltaBlue', 'results': [], 'stddev': ''},
......@@ -419,7 +422,7 @@ class PerfTest(unittest.TestCase):
test_input['timeout'] = 70
self._WriteTestInput(test_input)
self._MockCommand(['.'], [''], timed_out=True)
self.assertEquals(1, self._CallMain())
self.assertEqual(1, self._CallMain())
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': [], 'stddev': ''},
{'name': 'DeltaBlue', 'results': [], 'stddev': ''},
......@@ -441,7 +444,7 @@ class PerfTest(unittest.TestCase):
mock.patch(
'run_perf.Platform.ReadBuildConfig',
return_value={'is_android': True}).start()
self.assertEquals(0, self._CallMain('--arch', 'arm'))
self.assertEqual(0, self._CallMain('--arch', 'arm'))
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
......@@ -458,7 +461,7 @@ class PerfTest(unittest.TestCase):
'Richards: 100\nDeltaBlue: 20\n'])
test_output_secondary = os.path.join(
TEST_WORKSPACE, 'results_secondary.json')
self.assertEquals(0, self._CallMain(
self.assertEqual(0, self._CallMain(
'--outdir-secondary', 'out-secondary',
'--json-test-results-secondary', test_output_secondary,
))
......@@ -485,7 +488,7 @@ class PerfTest(unittest.TestCase):
test_input = dict(V8_JSON)
self._WriteTestInput(test_input)
self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
self.assertEquals(0, self._CallMain('--extra-flags=--prof'))
self.assertEqual(0, self._CallMain('--extra-flags=--prof'))
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
......@@ -499,8 +502,8 @@ class PerfTest(unittest.TestCase):
for i in [1, 2, 3]:
yield i, i + 1
l, r = run_perf.Unzip(Gen())
self.assertEquals([1, 2, 3], list(l()))
self.assertEquals([2, 3, 4], list(r()))
self.assertListEqual([1, 2, 3], list(l()))
self.assertListEqual([2, 3, 4], list(r()))
#############################################################################
### System tests
......@@ -518,8 +521,8 @@ class PerfTest(unittest.TestCase):
def testNormal(self):
results = self._RunPerf('d8_mocked1.py', 'test1.json')
self.assertEquals([], results['errors'])
self.assertEquals([
self.assertListEqual([], results['errors'])
self.assertListEqual(sorted([
{
'units': 'score',
'graphs': ['test1', 'Richards'],
......@@ -532,12 +535,12 @@ class PerfTest(unittest.TestCase):
'results': [u'2.1', u'2.1'],
'stddev': '',
},
], results['traces'])
]), sorted(results['traces']))
def testResultsProcessor(self):
results = self._RunPerf('d8_mocked2.py', 'test2.json')
self.assertEquals([], results['errors'])
self.assertEquals([
self.assertListEqual([], results['errors'])
self.assertListEqual([
{
'units': 'score',
'graphs': ['test2', 'Richards'],
......@@ -554,8 +557,8 @@ class PerfTest(unittest.TestCase):
def testResultsProcessorNested(self):
results = self._RunPerf('d8_mocked2.py', 'test3.json')
self.assertEquals([], results['errors'])
self.assertEquals([
self.assertListEqual([], results['errors'])
self.assertListEqual([
{
'units': 'score',
'graphs': ['test3', 'Octane', 'Richards'],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment