Commit 2f8e3352 authored by machenbach's avatar machenbach Committed by Commit bot

Revert of [tools] add --pretty switch to run_perf.py (patchset #7 id:120001 of...

Revert of [tools] add --pretty switch to run_perf.py (patchset #7 id:120001 of https://codereview.chromium.org/1681283004/ )

Reason for revert:
[Sheriff] Breaks android runs. Will look later why.

Original issue's description:
> [tools] add --pretty switch to run_perf.py
>
> This CL improves running our internal benchmarks locally by adding the
> --pretty option to tools/run_perf.py. With the flag enabled we print
> the run-time of each benchmark directly and avoid the json output at
> the end.
>
> NOTRY=true
>
> Committed: https://crrev.com/83f69507ab1b9380b56758b747d4f3fabc849e49
> Cr-Commit-Position: refs/heads/master@{#33981}

TBR=cbruni@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true

Review URL: https://codereview.chromium.org/1696293002

Cr-Commit-Position: refs/heads/master@{#34017}
parent 84a225d1
...@@ -405,10 +405,7 @@ class GraphConfig(Node): ...@@ -405,10 +405,7 @@ class GraphConfig(Node):
# TODO(machenbach): Currently that makes only sense for the leaf level. # TODO(machenbach): Currently that makes only sense for the leaf level.
# Multiple place holders for multiple levels are not supported. # Multiple place holders for multiple levels are not supported.
if parent.results_regexp: if parent.results_regexp:
try: regexp_default = parent.results_regexp % re.escape(suite["name"])
regexp_default = parent.results_regexp % re.escape(suite["name"])
except TypeError:
regexp_default = parent.results_regexp
else: else:
regexp_default = None regexp_default = None
self.results_regexp = suite.get("results_regexp", regexp_default) self.results_regexp = suite.get("results_regexp", regexp_default)
...@@ -590,25 +587,6 @@ class Platform(object): ...@@ -590,25 +587,6 @@ class Platform(object):
else: else:
return DesktopPlatform(options) return DesktopPlatform(options)
def GetPrettyFormatted(self, options):
return self
def PreExecution(self):
pass
def PostExecution(self):
pass
def PreTests(self, node, path):
pass
def PrintResult(self, result):
pass
def _PrintStdout(self, title, output):
print title % "Stdout"
print output.stdout
def _Run(self, runnable, count, no_patch=False): def _Run(self, runnable, count, no_patch=False):
raise NotImplementedError() # pragma: no cover raise NotImplementedError() # pragma: no cover
...@@ -631,72 +609,15 @@ class Platform(object): ...@@ -631,72 +609,15 @@ class Platform(object):
return stdout, None return stdout, None
class PlatformFormattedMixin(object):
"""
Helper mixin that adds formatted output used when running benchmarks
with the --pretty flag.
"""
def _PrintStdout(self, title, output):
sys.stdout.write("\r")
if output.exit_code != 0:
print output.stdout
return
# Assume the time is on the last line
result_line = output.stdout.splitlines()[-1].strip()
sys.stdout.write(result_line)
# Fill with spaces up to 80 characters.
sys.stdout.write(' '*max(0, 80-len(result_line)))
sys.stdout.flush()
def _GetMean(self, trace):
results = trace['results']
if len(results) == 0:
return 0
# If the tests provided a stddev the results consists of one single average
# value, so return that instead.
if trace['stddev']:
return results[0]
# For a non-zero length results list calculate the average here.
return sum([float(x) for x in results]) / len(results)
def _GetDeviation(self, trace):
# If the benchmark provided a stddev use that directly.
stddev = trace['stddev']
if stddev:
return stddev
# If no stddev was provided calculate it from the results.
results = trace['results']
if len(results) == 0:
return 0
mean = self._GetMean(trace)
square_deviation = sum((float(x)-mean)**2 for x in results)
return (square_deviation / len(results)) ** 0.5
def PrintResult(self, result):
if result.errors:
print "\r:Errors:"
print "\n".join(set(result.errors))
else:
trace = result.traces[0]
average = self._GetMean(trace)
stdev = self._GetDeviation(trace)
stdev_percentage = 100 * stdev / average if average != 0 else 0
result_string = "\r %s +/- %3.2f%% %s" % (
average, stdev_percentage, trace['units'])
sys.stdout.write(result_string)
# Fill with spaces up to 80 characters.
sys.stdout.write(' '*max(0, 80-len(result_string)))
sys.stdout.write("\n")
sys.stdout.flush()
class DesktopPlatform(Platform): class DesktopPlatform(Platform):
def __init__(self, options): def __init__(self, options):
super(DesktopPlatform, self).__init__(options) super(DesktopPlatform, self).__init__(options)
def GetPrettyFormatted(self, options): def PreExecution(self):
return PrettyFormattedDesktopPlatform(options) pass
def PostExecution(self):
pass
def PreTests(self, node, path): def PreTests(self, node, path):
if isinstance(node, RunnableConfig): if isinstance(node, RunnableConfig):
...@@ -715,7 +636,8 @@ class DesktopPlatform(Platform): ...@@ -715,7 +636,8 @@ class DesktopPlatform(Platform):
print title % "OSError" print title % "OSError"
print e print e
return "" return ""
self._PrintStdout(title, output) print title % "Stdout"
print output.stdout
if output.stderr: # pragma: no cover if output.stderr: # pragma: no cover
# Print stderr for debugging. # Print stderr for debugging.
print title % "Stderr" print title % "Stderr"
...@@ -732,10 +654,6 @@ class DesktopPlatform(Platform): ...@@ -732,10 +654,6 @@ class DesktopPlatform(Platform):
return output.stdout return output.stdout
class PrettyFormattedDesktopPlatform(PlatformFormattedMixin, DesktopPlatform):
pass
class AndroidPlatform(Platform): # pragma: no cover class AndroidPlatform(Platform): # pragma: no cover
DEVICE_DIR = "/data/local/tmp/v8/" DEVICE_DIR = "/data/local/tmp/v8/"
...@@ -753,9 +671,6 @@ class AndroidPlatform(Platform): # pragma: no cover ...@@ -753,9 +671,6 @@ class AndroidPlatform(Platform): # pragma: no cover
self.adb_wrapper = adb_wrapper.AdbWrapper(options.device) self.adb_wrapper = adb_wrapper.AdbWrapper(options.device)
self.device = device_utils.DeviceUtils(self.adb_wrapper) self.device = device_utils.DeviceUtils(self.adb_wrapper)
def GetPrettyFormatted(self, options):
return PrettyFormattedAndroidPlatform(options)
def PreExecution(self): def PreExecution(self):
perf = perf_control.PerfControl(self.device) perf = perf_control.PerfControl(self.device)
perf.SetHighPerfMode() perf.SetHighPerfMode()
...@@ -842,10 +757,6 @@ class AndroidPlatform(Platform): # pragma: no cover ...@@ -842,10 +757,6 @@ class AndroidPlatform(Platform): # pragma: no cover
for resource in node.resources: for resource in node.resources:
self._PushFile(bench_abs, resource, bench_rel) self._PushFile(bench_abs, resource, bench_rel)
def _PrintStdout(self, title, output):
print title % "Stdout"
print "\n".join(output.stdout)
def _Run(self, runnable, count, no_patch=False): def _Run(self, runnable, count, no_patch=False):
suffix = ' - without patch' if no_patch else '' suffix = ' - without patch' if no_patch else ''
target_dir = "bin_no_patch" if no_patch else "bin" target_dir = "bin_no_patch" if no_patch else "bin"
...@@ -869,17 +780,15 @@ class AndroidPlatform(Platform): # pragma: no cover ...@@ -869,17 +780,15 @@ class AndroidPlatform(Platform): # pragma: no cover
timeout=runnable.timeout, timeout=runnable.timeout,
retries=0, retries=0,
) )
self._PrintStdout(title, output) stdout = "\n".join(output)
print title % "Stdout"
print stdout
except device_errors.CommandTimeoutError: except device_errors.CommandTimeoutError:
print ">>> Test timed out after %ss." % runnable.timeout print ">>> Test timed out after %ss." % runnable.timeout
stdout = "" stdout = ""
return stdout return stdout
class PrettyFormattedAndroidPlatform(PlatformFormattedMixin, AndroidPlatform):
pass
# TODO: Implement results_processor. # TODO: Implement results_processor.
def Main(args): def Main(args):
logging.getLogger().setLevel(logging.INFO) logging.getLogger().setLevel(logging.INFO)
...@@ -909,9 +818,6 @@ def Main(args): ...@@ -909,9 +818,6 @@ def Main(args):
default="out") default="out")
parser.add_option("--outdir-no-patch", parser.add_option("--outdir-no-patch",
help="Base directory with compile output without patch") help="Base directory with compile output without patch")
parser.add_option("--pretty",
help="Print human readable output",
default=False, action="store_true")
parser.add_option("--binary-override-path", parser.add_option("--binary-override-path",
help="JavaScript engine binary. By default, d8 under " help="JavaScript engine binary. By default, d8 under "
"architecture-specific build dir. " "architecture-specific build dir. "
...@@ -967,8 +873,6 @@ def Main(args): ...@@ -967,8 +873,6 @@ def Main(args):
options.shell_dir_no_patch = None options.shell_dir_no_patch = None
platform = Platform.GetPlatform(options) platform = Platform.GetPlatform(options)
if options.pretty:
platform = platform.GetPrettyFormatted(options)
results = Results() results = Results()
results_no_patch = Results() results_no_patch = Results()
...@@ -1010,7 +914,6 @@ def Main(args): ...@@ -1010,7 +914,6 @@ def Main(args):
# Let runnable iterate over all runs and handle output. # Let runnable iterate over all runs and handle output.
result, result_no_patch = runnable.Run( result, result_no_patch = runnable.Run(
Runner, trybot=options.shell_dir_no_patch) Runner, trybot=options.shell_dir_no_patch)
platform.PrintResult(result)
results += result results += result
results_no_patch += result_no_patch results_no_patch += result_no_patch
platform.PostExecution() platform.PostExecution()
...@@ -1018,14 +921,12 @@ def Main(args): ...@@ -1018,14 +921,12 @@ def Main(args):
if options.json_test_results: if options.json_test_results:
results.WriteToFile(options.json_test_results) results.WriteToFile(options.json_test_results)
else: # pragma: no cover else: # pragma: no cover
if not options.pretty: print results
print results
if options.json_test_results_no_patch: if options.json_test_results_no_patch:
results_no_patch.WriteToFile(options.json_test_results_no_patch) results_no_patch.WriteToFile(options.json_test_results_no_patch)
else: # pragma: no cover else: # pragma: no cover
if not options.pretty: print results_no_patch
print results_no_patch
return min(1, len(results.errors)) return min(1, len(results.errors))
......
...@@ -412,7 +412,6 @@ class PerfTest(unittest.TestCase): ...@@ -412,7 +412,6 @@ class PerfTest(unittest.TestCase):
platform.PreExecution = MagicMock(return_value=None) platform.PreExecution = MagicMock(return_value=None)
platform.PostExecution = MagicMock(return_value=None) platform.PostExecution = MagicMock(return_value=None)
platform.PreTests = MagicMock(return_value=None) platform.PreTests = MagicMock(return_value=None)
platform.PrintResult = MagicMock(return_value=None)
platform.Run = MagicMock( platform.Run = MagicMock(
return_value=("Richards: 1.234\nDeltaBlue: 10657567\n", None)) return_value=("Richards: 1.234\nDeltaBlue: 10657567\n", None))
run_perf.AndroidPlatform = MagicMock(return_value=platform) run_perf.AndroidPlatform = MagicMock(return_value=platform)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment