Commit 4a58b46e authored by machenbach's avatar machenbach Committed by Commit bot

Revert of [tools] add --pretty switch to run_perf.py (patchset #8 id:140001 of...

Revert of [tools] add --pretty switch to run_perf.py (patchset #8 id:140001 of https://codereview.chromium.org/1681283004/ )

Reason for revert:
[Sheriff] Still fails on android.

Original issue's description:
> reland [tools] add --pretty switch to run_perf.py
>
> This CL improves running our internal benchmarks locally by adding the
> --pretty option to tools/run_perf.py. With the flag enabled we print
> the run-time of each benchmark directly and avoid the json output at
> the end.
>
> NOTRY=true
>
> Committed: https://crrev.com/b543c40d78c3aa8283a7d9fccb6c9e26a9a81f90
> Cr-Commit-Position: refs/heads/master@{#34023}

TBR=cbruni@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true

Review URL: https://codereview.chromium.org/1700953002

Cr-Commit-Position: refs/heads/master@{#34033}
parent 27204d6e
......@@ -405,10 +405,7 @@ class GraphConfig(Node):
# TODO(machenbach): Currently that makes only sense for the leaf level.
# Multiple place holders for multiple levels are not supported.
if parent.results_regexp:
try:
regexp_default = parent.results_regexp % re.escape(suite["name"])
except TypeError:
regexp_default = parent.results_regexp
regexp_default = parent.results_regexp % re.escape(suite["name"])
else:
regexp_default = None
self.results_regexp = suite.get("results_regexp", regexp_default)
......@@ -590,25 +587,6 @@ class Platform(object):
else:
return DesktopPlatform(options)
def GetPrettyFormatted(self, options):
return self
def PreExecution(self):
pass
def PostExecution(self):
pass
def PreTests(self, node, path):
pass
def PrintResult(self, result):
pass
def _PrintStdout(self, title, output):
print title % "Stdout"
print output.stdout
def _Run(self, runnable, count, no_patch=False):
raise NotImplementedError() # pragma: no cover
......@@ -631,72 +609,15 @@ class Platform(object):
return stdout, None
class PlatformFormattedMixin(object):
"""
Helper mixin that adds formatted output used when running benchmarks
with the --pretty flag.
"""
def _PrintStdout(self, title, output):
sys.stdout.write("\r")
if output.exit_code != 0:
print output.stdout
return
# Assume the time is on the last line
result_line = output.stdout.splitlines()[-1].strip()
sys.stdout.write(result_line)
# Fill with spaces up to 80 characters.
sys.stdout.write(' '*max(0, 80-len(result_line)))
sys.stdout.flush()
def _GetMean(self, trace):
results = trace['results']
if len(results) == 0:
return 0
# If the tests provided a stddev the results consists of one single average
# value, so return that instead.
if trace['stddev']:
return results[0]
# For a non-zero length results list calculate the average here.
return sum([float(x) for x in results]) / len(results)
def _GetDeviation(self, trace):
# If the benchmark provided a stddev use that directly.
stddev = trace['stddev']
if stddev:
return stddev
# If no stddev was provided calculate it from the results.
results = trace['results']
if len(results) == 0:
return 0
mean = self._GetMean(trace)
square_deviation = sum((float(x)-mean)**2 for x in results)
return (square_deviation / len(results)) ** 0.5
def PrintResult(self, result):
if result.errors:
print "\r:Errors:"
print "\n".join(set(result.errors))
else:
trace = result.traces[0]
average = self._GetMean(trace)
stdev = self._GetDeviation(trace)
stdev_percentage = 100 * stdev / average if average != 0 else 0
result_string = "\r %s +/- %3.2f%% %s" % (
average, stdev_percentage, trace['units'])
sys.stdout.write(result_string)
# Fill with spaces up to 80 characters.
sys.stdout.write(' '*max(0, 80-len(result_string)))
sys.stdout.write("\n")
sys.stdout.flush()
class DesktopPlatform(Platform):
def __init__(self, options):
super(DesktopPlatform, self).__init__(options)
def GetPrettyFormatted(self, options):
return PrettyFormattedDesktopPlatform(options)
def PreExecution(self):
pass
def PostExecution(self):
pass
def PreTests(self, node, path):
if isinstance(node, RunnableConfig):
......@@ -715,7 +636,8 @@ class DesktopPlatform(Platform):
print title % "OSError"
print e
return ""
self._PrintStdout(title, output)
print title % "Stdout"
print output.stdout
if output.stderr: # pragma: no cover
# Print stderr for debugging.
print title % "Stderr"
......@@ -732,10 +654,6 @@ class DesktopPlatform(Platform):
return output.stdout
class PrettyFormattedDesktopPlatform(PlatformFormattedMixin, DesktopPlatform):
pass
class AndroidPlatform(Platform): # pragma: no cover
DEVICE_DIR = "/data/local/tmp/v8/"
......@@ -753,9 +671,6 @@ class AndroidPlatform(Platform): # pragma: no cover
self.adb_wrapper = adb_wrapper.AdbWrapper(options.device)
self.device = device_utils.DeviceUtils(self.adb_wrapper)
def GetPrettyFormatted(self, options):
return PrettyFormattedAndroidPlatform(options)
def PreExecution(self):
perf = perf_control.PerfControl(self.device)
perf.SetHighPerfMode()
......@@ -842,10 +757,6 @@ class AndroidPlatform(Platform): # pragma: no cover
for resource in node.resources:
self._PushFile(bench_abs, resource, bench_rel)
def _PrintStdout(self, title, output):
print title % "Stdout"
print "\n".join(output)
def _Run(self, runnable, count, no_patch=False):
suffix = ' - without patch' if no_patch else ''
target_dir = "bin_no_patch" if no_patch else "bin"
......@@ -869,17 +780,15 @@ class AndroidPlatform(Platform): # pragma: no cover
timeout=runnable.timeout,
retries=0,
)
self._PrintStdout(title, output)
stdout = "\n".join(output)
print title % "Stdout"
print stdout
except device_errors.CommandTimeoutError:
print ">>> Test timed out after %ss." % runnable.timeout
stdout = ""
return stdout
class PrettyFormattedAndroidPlatform(PlatformFormattedMixin, AndroidPlatform):
pass
# TODO: Implement results_processor.
def Main(args):
logging.getLogger().setLevel(logging.INFO)
......@@ -909,9 +818,6 @@ def Main(args):
default="out")
parser.add_option("--outdir-no-patch",
help="Base directory with compile output without patch")
parser.add_option("--pretty",
help="Print human readable output",
default=False, action="store_true")
parser.add_option("--binary-override-path",
help="JavaScript engine binary. By default, d8 under "
"architecture-specific build dir. "
......@@ -967,8 +873,6 @@ def Main(args):
options.shell_dir_no_patch = None
platform = Platform.GetPlatform(options)
if options.pretty:
platform = platform.GetPrettyFormatted(options)
results = Results()
results_no_patch = Results()
......@@ -1010,7 +914,6 @@ def Main(args):
# Let runnable iterate over all runs and handle output.
result, result_no_patch = runnable.Run(
Runner, trybot=options.shell_dir_no_patch)
platform.PrintResult(result)
results += result
results_no_patch += result_no_patch
platform.PostExecution()
......@@ -1018,14 +921,12 @@ def Main(args):
if options.json_test_results:
results.WriteToFile(options.json_test_results)
else: # pragma: no cover
if not options.pretty:
print results
print results
if options.json_test_results_no_patch:
results_no_patch.WriteToFile(options.json_test_results_no_patch)
else: # pragma: no cover
if not options.pretty:
print results_no_patch
print results_no_patch
return min(1, len(results.errors))
......
......@@ -412,7 +412,6 @@ class PerfTest(unittest.TestCase):
platform.PreExecution = MagicMock(return_value=None)
platform.PostExecution = MagicMock(return_value=None)
platform.PreTests = MagicMock(return_value=None)
platform.PrintResult = MagicMock(return_value=None)
platform.Run = MagicMock(
return_value=("Richards: 1.234\nDeltaBlue: 10657567\n", None))
run_perf.AndroidPlatform = MagicMock(return_value=platform)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment