Commit 64d7300d authored by Sergiy Belozorov's avatar Sergiy Belozorov Committed by Commit Bot

[tools] Refactor code to use Output objects instead of stdout in most places

This is part of the refactoring to allow exporting more information about
test execution to the recipes and upload this information to ChromePerf.

R=machenbach@chromium.org,tmrts@chromium.org

No-Try: true
No-Tree-Checks: true
Bug: chromium:841700
Change-Id: Iab400e8922231d8eac91a6fa22ce8f45053f7ac6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1569442Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Sergiy Belozorov <sergiyb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60944}
parent 6957e23b
...@@ -108,6 +108,7 @@ from __future__ import print_function ...@@ -108,6 +108,7 @@ from __future__ import print_function
from functools import reduce from functools import reduce
from collections import OrderedDict from collections import OrderedDict
import copy
import datetime import datetime
import json import json
import logging import logging
...@@ -122,6 +123,7 @@ import traceback ...@@ -122,6 +123,7 @@ import traceback
from testrunner.local import android from testrunner.local import android
from testrunner.local import command from testrunner.local import command
from testrunner.local import utils from testrunner.local import utils
from testrunner.objects.output import Output
try: try:
basestring # Python 2 basestring # Python 2
...@@ -205,9 +207,9 @@ class Measurement(object): ...@@ -205,9 +207,9 @@ class Measurement(object):
self.stddev = '' self.stddev = ''
self.process_size = False self.process_size = False
def ConsumeOutput(self, stdout): def ConsumeOutput(self, output):
try: try:
result = re.search(self.results_regexp, stdout, re.M).group(1) result = re.search(self.results_regexp, output.stdout, re.M).group(1)
self.results.append(str(float(result))) self.results.append(str(float(result)))
except ValueError: except ValueError:
self.errors.append('Regexp "%s" returned a non-numeric for test %s.' self.errors.append('Regexp "%s" returned a non-numeric for test %s.'
...@@ -221,7 +223,8 @@ class Measurement(object): ...@@ -221,7 +223,8 @@ class Measurement(object):
self.errors.append('Test %s should only run once since a stddev ' self.errors.append('Test %s should only run once since a stddev '
'is provided by the test.' % self.name) 'is provided by the test.' % self.name)
if self.stddev_regexp: if self.stddev_regexp:
self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1) self.stddev = re.search(
self.stddev_regexp, output.stdout, re.M).group(1)
except: except:
self.errors.append('Regexp "%s" did not match for test %s.' self.errors.append('Regexp "%s" did not match for test %s.'
% (self.stddev_regexp, self.name)) % (self.stddev_regexp, self.name))
...@@ -239,7 +242,7 @@ class NullMeasurement(object): ...@@ -239,7 +242,7 @@ class NullMeasurement(object):
"""Null object to avoid having extra logic for configurations that don't """Null object to avoid having extra logic for configurations that don't
require secondary run, e.g. CI bots. require secondary run, e.g. CI bots.
""" """
def ConsumeOutput(self, stdout): def ConsumeOutput(self, output):
pass pass
def GetResults(self): def GetResults(self):
...@@ -255,10 +258,10 @@ def Unzip(iterable): ...@@ -255,10 +258,10 @@ def Unzip(iterable):
return lambda: iter(left), lambda: iter(right) return lambda: iter(left), lambda: iter(right)
def RunResultsProcessor(results_processor, stdout, count): def RunResultsProcessor(results_processor, output, count):
# Dummy pass through for null-runs. # Dummy pass through for null-runs.
if stdout is None: if output is None or output.stdout is None:
return None return output
# We assume the results processor is relative to the suite. # We assume the results processor is relative to the suite.
assert os.path.exists(results_processor) assert os.path.exists(results_processor)
...@@ -268,13 +271,14 @@ def RunResultsProcessor(results_processor, stdout, count): ...@@ -268,13 +271,14 @@ def RunResultsProcessor(results_processor, stdout, count):
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stderr=subprocess.PIPE,
) )
result, _ = p.communicate(input=stdout) new_output = copy.copy(output)
logging.info('>>> Processed stdout (#%d):\n%s', count, result) new_output.stdout, _ = p.communicate(input=output.stdout)
return result logging.info('>>> Processed stdout (#%d):\n%s', count, output.stdout)
return new_output
def AccumulateResults( def AccumulateResults(
graph_names, trace_configs, iter_output, perform_measurement, calc_total): graph_names, trace_configs, output_iter, perform_measurement, calc_total):
"""Iterates over the output of multiple benchmark reruns and accumulates """Iterates over the output of multiple benchmark reruns and accumulates
results for a configured list of traces. results for a configured list of traces.
...@@ -283,7 +287,7 @@ def AccumulateResults( ...@@ -283,7 +287,7 @@ def AccumulateResults(
['v8', 'Octane']. ['v8', 'Octane'].
trace_configs: List of 'TraceConfig' instances. Each trace config defines trace_configs: List of 'TraceConfig' instances. Each trace config defines
how to perform a measurement. how to perform a measurement.
iter_output: Iterator over the standard output of each test run. output_iter: Iterator over the output of each test run.
perform_measurement: Whether to actually run tests and perform measurements. perform_measurement: Whether to actually run tests and perform measurements.
This is needed so that we reuse this script for both CI This is needed so that we reuse this script for both CI
and trybot, but want to ignore second run on CI without and trybot, but want to ignore second run on CI without
...@@ -293,9 +297,9 @@ def AccumulateResults( ...@@ -293,9 +297,9 @@ def AccumulateResults(
""" """
measurements = [ measurements = [
trace.CreateMeasurement(perform_measurement) for trace in trace_configs] trace.CreateMeasurement(perform_measurement) for trace in trace_configs]
for stdout in iter_output(): for output in output_iter():
for measurement in measurements: for measurement in measurements:
measurement.ConsumeOutput(stdout) measurement.ConsumeOutput(output)
res = reduce(lambda r, m: r + m.GetResults(), measurements, Results()) res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())
...@@ -322,7 +326,7 @@ def AccumulateResults( ...@@ -322,7 +326,7 @@ def AccumulateResults(
return res return res
def AccumulateGenericResults(graph_names, suite_units, iter_output): def AccumulateGenericResults(graph_names, suite_units, output_iter):
"""Iterates over the output of multiple benchmark reruns and accumulates """Iterates over the output of multiple benchmark reruns and accumulates
generic results. generic results.
...@@ -330,15 +334,15 @@ def AccumulateGenericResults(graph_names, suite_units, iter_output): ...@@ -330,15 +334,15 @@ def AccumulateGenericResults(graph_names, suite_units, iter_output):
graph_names: List of names that configure the base path of the traces. E.g. graph_names: List of names that configure the base path of the traces. E.g.
['v8', 'Octane']. ['v8', 'Octane'].
suite_units: Measurement default units as defined by the benchmark suite. suite_units: Measurement default units as defined by the benchmark suite.
iter_output: Iterator over the standard output of each test run. output_iter: Iterator over the output of each test run.
Returns: A 'Results' object. Returns: A 'Results' object.
""" """
traces = OrderedDict() traces = OrderedDict()
for stdout in iter_output(): for output in output_iter():
if stdout is None: if output is None:
# The None value is used as a null object to simplify logic. # The None value is used as a null object to simplify logic.
continue continue
for line in stdout.strip().splitlines(): for line in output.stdout.strip().splitlines():
match = GENERIC_RESULTS_RE.match(line) match = GENERIC_RESULTS_RE.match(line)
if match: if match:
stddev = '' stddev = ''
...@@ -497,14 +501,14 @@ class RunnableConfig(GraphConfig): ...@@ -497,14 +501,14 @@ class RunnableConfig(GraphConfig):
def main(self): def main(self):
return self._suite.get('main', '') return self._suite.get('main', '')
def PostProcess(self, stdouts_iter): def PostProcess(self, outputs_iter):
if self.results_processor: if self.results_processor:
def it(): def it():
for i, stdout in enumerate(stdouts_iter()): for i, output in enumerate(outputs_iter()):
yield RunResultsProcessor(self.results_processor, stdout, i + 1) yield RunResultsProcessor(self.results_processor, output, i + 1)
return it return it
else: else:
return stdouts_iter return outputs_iter
def ChangeCWD(self, suite_path): def ChangeCWD(self, suite_path):
"""Changes the cwd to to path defined in the current graph. """Changes the cwd to to path defined in the current graph.
...@@ -539,19 +543,19 @@ class RunnableConfig(GraphConfig): ...@@ -539,19 +543,19 @@ class RunnableConfig(GraphConfig):
def Run(self, runner, trybot): def Run(self, runner, trybot):
"""Iterates over several runs and handles the output for all traces.""" """Iterates over several runs and handles the output for all traces."""
stdout, stdout_secondary = Unzip(runner()) output, output_secondary = Unzip(runner())
return ( return (
AccumulateResults( AccumulateResults(
self.graphs, self.graphs,
self._children, self._children,
iter_output=self.PostProcess(stdout), output_iter=self.PostProcess(output),
perform_measurement=True, perform_measurement=True,
calc_total=self.total, calc_total=self.total,
), ),
AccumulateResults( AccumulateResults(
self.graphs, self.graphs,
self._children, self._children,
iter_output=self.PostProcess(stdout_secondary), output_iter=self.PostProcess(output_secondary),
perform_measurement=trybot, # only run second time on trybots perform_measurement=trybot, # only run second time on trybots
calc_total=self.total, calc_total=self.total,
), ),
...@@ -567,9 +571,9 @@ class RunnableTraceConfig(TraceConfig, RunnableConfig): ...@@ -567,9 +571,9 @@ class RunnableTraceConfig(TraceConfig, RunnableConfig):
"""Iterates over several runs and handles the output.""" """Iterates over several runs and handles the output."""
measurement = self.CreateMeasurement(perform_measurement=True) measurement = self.CreateMeasurement(perform_measurement=True)
measurement_secondary = self.CreateMeasurement(perform_measurement=trybot) measurement_secondary = self.CreateMeasurement(perform_measurement=trybot)
for stdout, stdout_secondary in runner(): for output, output_secondary in runner():
measurement.ConsumeOutput(stdout) measurement.ConsumeOutput(output)
measurement_secondary.ConsumeOutput(stdout_secondary) measurement_secondary.ConsumeOutput(output_secondary)
return ( return (
measurement.GetResults(), measurement.GetResults(),
measurement_secondary.GetResults(), measurement_secondary.GetResults(),
...@@ -582,10 +586,10 @@ class RunnableGenericConfig(RunnableConfig): ...@@ -582,10 +586,10 @@ class RunnableGenericConfig(RunnableConfig):
super(RunnableGenericConfig, self).__init__(suite, parent, arch) super(RunnableGenericConfig, self).__init__(suite, parent, arch)
def Run(self, runner, trybot): def Run(self, runner, trybot):
stdout, stdout_secondary = Unzip(runner()) output, output_secondary = Unzip(runner())
return ( return (
AccumulateGenericResults(self.graphs, self.units, stdout), AccumulateGenericResults(self.graphs, self.units, output),
AccumulateGenericResults(self.graphs, self.units, stdout_secondary), AccumulateGenericResults(self.graphs, self.units, output_secondary),
) )
...@@ -675,7 +679,11 @@ class Platform(object): ...@@ -675,7 +679,11 @@ class Platform(object):
runnable_duration = datetime.datetime.utcnow() - runnable_start_time runnable_duration = datetime.datetime.utcnow() - runnable_start_time
if runnable_duration.total_seconds() > 0.9 * runnable.timeout: if runnable_duration.total_seconds() > 0.9 * runnable.timeout:
runnable.has_near_timeouts = True runnable.has_near_timeouts = True
return stdout # TODO(sergiyb): Move creating this object closer to the actual command
# execution and popupate more fields with actual data.
return Output(
exit_code=0, timed_out=False, stdout=stdout, stderr=None, pid=None,
duration=runnable_duration)
def Run(self, runnable, count): def Run(self, runnable, count):
"""Execute the benchmark's main file. """Execute the benchmark's main file.
...@@ -688,11 +696,11 @@ class Platform(object): ...@@ -688,11 +696,11 @@ class Platform(object):
Returns: A tuple with the two benchmark outputs. The latter will be None if Returns: A tuple with the two benchmark outputs. The latter will be None if
options.shell_dir_secondary was not specified. options.shell_dir_secondary was not specified.
""" """
stdout = self._TimedRun(runnable, count, secondary=False) output = self._TimedRun(runnable, count, secondary=False)
if self.shell_dir_secondary: if self.shell_dir_secondary:
return stdout, self._TimedRun(runnable, count, secondary=True) return output, self._TimedRun(runnable, count, secondary=True)
else: else:
return stdout, None return output, None
class DesktopPlatform(Platform): class DesktopPlatform(Platform):
......
...@@ -456,10 +456,12 @@ class PerfTest(unittest.TestCase): ...@@ -456,10 +456,12 @@ class PerfTest(unittest.TestCase):
mock.patch('run_perf.AndroidPlatform.PreExecution').start() mock.patch('run_perf.AndroidPlatform.PreExecution').start()
mock.patch('run_perf.AndroidPlatform.PostExecution').start() mock.patch('run_perf.AndroidPlatform.PostExecution').start()
mock.patch('run_perf.AndroidPlatform.PreTests').start() mock.patch('run_perf.AndroidPlatform.PreTests').start()
mock_output = Output(
stdout='Richards: 1.234\nDeltaBlue: 10657567\n', stderr=None,
timed_out=False, exit_code=0)
mock.patch( mock.patch(
'run_perf.AndroidPlatform.Run', 'run_perf.AndroidPlatform.Run',
return_value=( return_value=(mock_output, None)).start()
'Richards: 1.234\nDeltaBlue: 10657567\n', None)).start()
mock.patch('testrunner.local.android._Driver', autospec=True).start() mock.patch('testrunner.local.android._Driver', autospec=True).start()
mock.patch( mock.patch(
'run_perf.Platform.ReadBuildConfig', 'run_perf.Platform.ReadBuildConfig',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment