Commit 116d77b4 authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[tools] Add grouping to generate-runtime-callstats.py

Factors out the group regexes from callstats.py so the two tools can
share them. When --group is specified, the stats are grouped together
using the callstats.py groupings. Also adds --filter (can be supplied
multiple times) to only show certain groups.

Under the hood, this converts the simple arrays and dictionaries to use
classes to simplify the code somewhat.

Change-Id: If6b548e109212adfdf46fa04e7b21638f84a0e26
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1962864Reviewed-by: 's avatarMythri Alle <mythria@chromium.org>
Commit-Queue: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65429}
parent 74a68c53
...@@ -29,6 +29,7 @@ import subprocess ...@@ -29,6 +29,7 @@ import subprocess
import sys import sys
import tempfile import tempfile
import operator import operator
from callstats_groups import RUNTIME_CALL_STATS_GROUPS
import numpy import numpy
import scipy import scipy
......
# Copyright 2019 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
RUNTIME_CALL_STATS_GROUPS = [
('Group-IC', re.compile(".*IC_.*")),
('Group-OptimizeBackground', re.compile(".*OptimizeBackground.*")),
('Group-Optimize',
re.compile("StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*")),
('Group-CompileBackground', re.compile("(.*CompileBackground.*)")),
('Group-Compile', re.compile("(^Compile.*)|(.*_Compile.*)")),
('Group-ParseBackground', re.compile(".*ParseBackground.*")),
('Group-Parse', re.compile(".*Parse.*")),
('Group-Callback', re.compile(".*Callback.*")),
('Group-API', re.compile(".*API.*")),
('Group-GC-Custom', re.compile("GC_Custom_.*")),
('Group-GC-Background', re.compile(".*GC.*BACKGROUND.*")),
('Group-GC', re.compile("GC_.*|AllocateInTargetSpace")),
('Group-JavaScript', re.compile("JS_Execution")),
('Group-Runtime', re.compile(".*"))]
...@@ -20,10 +20,17 @@ import sys ...@@ -20,10 +20,17 @@ import sys
import tempfile import tempfile
import gzip import gzip
from callstats_groups import RUNTIME_CALL_STATS_GROUPS
def parse_args(): def parse_args():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Run story and collect runtime call stats.") description="Run story and collect runtime call stats.")
parser.add_argument("story", metavar="story", nargs=1, help="story to run") parser.add_argument("story", metavar="story", nargs=1, help="story to run")
parser.add_argument(
"--group",
dest="group",
action="store_true",
help="group common stats together into buckets")
parser.add_argument( parser.add_argument(
"-r", "-r",
"--repeats", "--repeats",
...@@ -57,14 +64,22 @@ def parse_args(): ...@@ -57,14 +64,22 @@ def parse_args():
dest="format", dest="format",
action="store", action="store",
choices=["csv", "table"], choices=["csv", "table"],
help=("output as CSV")) help="output as CSV")
parser.add_argument( parser.add_argument(
"-o", "-o",
"--output", "--output",
metavar="FILE", metavar="FILE",
dest="out_file", dest="out_file",
action="store", action="store",
help=("write table to FILE rather stdout")) help="write table to FILE rather stdout")
parser.add_argument(
"--browser",
dest="browser",
metavar="BROWSER_TYPE",
action="store",
default="release",
help=("Passed directly to --browser option of run_benchmark. Ignored if "
"-executable is used"))
parser.add_argument( parser.add_argument(
"-e", "-e",
"--executable", "--executable",
...@@ -102,6 +117,11 @@ def parse_args(): ...@@ -102,6 +117,11 @@ def parse_args():
dest="stdev", dest="stdev",
action="store_true", action="store_true",
help="adds columns for the standard deviation") help="adds columns for the standard deviation")
parser.add_argument(
"--filter",
dest="filter",
action="append",
help="useable with --group to only show buckets specified by filter")
return parser.parse_args() return parser.parse_args()
...@@ -130,7 +150,7 @@ def process_trace(trace_file): ...@@ -130,7 +150,7 @@ def process_trace(trace_file):
def run_benchmark(story, repeats=1, output_dir=".", verbose=False, js_flags=None, def run_benchmark(story, repeats=1, output_dir=".", verbose=False, js_flags=None,
browser_args=None, chromium_dir=".", executable=None, browser_args=None, chromium_dir=".", executable=None,
benchmark="v8.browsing_desktop", device=None): benchmark="v8.browsing_desktop", device=None, browser="release"):
orig_chromium_dir = chromium_dir orig_chromium_dir = chromium_dir
xvfb = os.path.join(chromium_dir, "testing", "xvfb.py") xvfb = os.path.join(chromium_dir, "testing", "xvfb.py")
...@@ -155,7 +175,7 @@ def run_benchmark(story, repeats=1, output_dir=".", verbose=False, js_flags=None ...@@ -155,7 +175,7 @@ def run_benchmark(story, repeats=1, output_dir=".", verbose=False, js_flags=None
if executable: if executable:
command += ["--browser-executable", executable] command += ["--browser-executable", executable]
else: else:
command += ["--browser", "release"] command += ["--browser", browser]
if device: if device:
command += ["--device", device] command += ["--device", device]
...@@ -202,13 +222,18 @@ def run_benchmark(story, repeats=1, output_dir=".", verbose=False, js_flags=None ...@@ -202,13 +222,18 @@ def run_benchmark(story, repeats=1, output_dir=".", verbose=False, js_flags=None
print("\nrun_benchmark completed") print("\nrun_benchmark completed")
def write_output(f, table, headers, format="table"): def write_output(f, table, headers, run_count, format="table"):
if format == "csv": if format == "csv":
# strip new lines from CSV output
headers = [h.replace('\n', ' ') for h in headers]
writer = csv.writer(f) writer = csv.writer(f)
writer.writerow(headers) writer.writerow(headers)
writer.writerows(table) writer.writerows(table)
else: else:
f.write(tabulate.tabulate(table, headers=headers, floatfmt=".2f")) # First column is name, and then they alternate between counts and durations
summary_count = len(headers) - 2 * run_count - 1
floatfmt = ("",) + (".0f", ".2f") * run_count + (".2f",) * summary_count
f.write(tabulate.tabulate(table, headers=headers, floatfmt=floatfmt))
f.write("\n") f.write("\n")
...@@ -232,10 +257,120 @@ def main(): ...@@ -232,10 +257,120 @@ def main():
chromium_dir=args.chromium_dir, chromium_dir=args.chromium_dir,
benchmark=args.benchmark, benchmark=args.benchmark,
executable=args.executable, executable=args.executable,
browser=args.browser,
device=args.device) device=args.device)
outputs = {} outputs = {}
combined_output = {} combined_output = {}
if args.group:
groups = RUNTIME_CALL_STATS_GROUPS
else:
groups = []
class Row:
def __init__(self, name, run_count):
self.name = name
self.durations = [0] * run_count
self.counts = [0] * run_count
self.mean_duration = None
self.mean_count = None
self.stdev_duration = None
self.stdev_count = None
def __repr__(self):
data_str = ", ".join(str((c, d)) for
(c, d) in zip(self.counts, self.durations))
return (f"{self.name}: {data_str}, mean_count: {self.mean_count}, " +
f"mean_duration: {self.mean_duration}")
def add_data(self, counts, durations):
self.counts = counts
self.durations = durations
def add_data_point(self, run, count, duration):
self.counts[run] = count
self.durations[run] = duration
def prepare(self, stdev=False):
if len(self.durations) > 1:
self.mean_duration = statistics.mean(self.durations)
self.mean_count = statistics.mean(self.counts)
if stdev:
self.stdev_duration = statistics.stdev(self.durations)
self.stdev_count = statistics.stdev(self.counts)
def as_list(self):
l = [self.name]
for (c, d) in zip(self.counts, self.durations):
l += [c, d]
if self.mean_duration is not None:
l += [self.mean_count]
if self.stdev_count is not None:
l += [self.stdev_count]
l += [self.mean_duration]
if self.stdev_duration is not None:
l += [self.stdev_duration]
return l
def key(self):
if self.mean_duration is not None:
return self.mean_duration
else:
return self.durations[0]
class Bucket:
def __init__(self, name, run_count):
self.name = name
self.run_count = run_count
self.data = {}
self.table = None
self.total_row = None
def __repr__(self):
s = "Bucket: " + self.name + " {\n"
if self.table:
s += "\n ".join(str(row) for row in self.table) + "\n"
elif self.data:
s += "\n ".join(str(row) for row in self.data.values()) + "\n"
if self.total_row:
s += " " + str(self.total_row) + "\n"
return s + "}"
def add_data_point(self, name, run, count, duration):
if name not in self.data:
self.data[name] = Row(name, self.run_count)
self.data[name].add_data_point(run, count, duration)
def prepare(self, stdev=False):
if self.data:
for row in self.data.values():
row.prepare(stdev)
self.table = sorted(self.data.values(), key=Row.key)
self.total_row = Row("Total", self.run_count)
self.total_row.add_data(
[sum(r.counts[i] for r in self.data.values()) for i in range(0, self.run_count)],
[sum(r.durations[i] for r in self.data.values()) for i in range(0, self.run_count)])
self.total_row.prepare(stdev)
def as_list(self, add_bucket_titles=True, filter=None):
t = []
if filter is None or self.name in filter:
if add_bucket_titles:
t += [["\n"], [self.name]]
t += [r.as_list() for r in self.table]
t += [self.total_row.as_list()]
return t
buckets = {}
for i in range(0, args.repeats): for i in range(0, args.repeats):
story_dir = f"{story.replace(':', '_')}_{i + 1}" story_dir = f"{story.replace(':', '_')}_{i + 1}"
trace_dir = os.path.join(output_dir, story_dir, "trace", "traceEvents") trace_dir = os.path.join(output_dir, story_dir, "trace", "traceEvents")
...@@ -259,60 +394,46 @@ def main(): ...@@ -259,60 +394,46 @@ def main():
output = process_trace(trace_file) output = process_trace(trace_file)
outputs[i] = output outputs[i] = output
for name in output: for name in output:
bucket_name = "Other"
for group in groups:
if group[1].match(name):
bucket_name = group[0]
break
value = output[name] value = output[name]
if name not in combined_output: if bucket_name not in buckets:
combined_output[name] = { bucket = Bucket(bucket_name, args.repeats)
"duration": [0.0] * args.repeats, buckets[bucket_name] = bucket
"count": [0] * args.repeats else:
} bucket = buckets[bucket_name]
combined_output[name]["count"][i] = value["count"] bucket.add_data_point(name, i, value["count"], value["duration"] / 1000.0)
combined_output[name]["duration"][i] = value["duration"] / 1000.0
for b in buckets.values():
table = [] b.prepare(args.stdev)
for name in combined_output:
value = combined_output[name] def create_table(buckets, record_bucket_names=True, filter=None):
row = [name] table = []
total_count = 0 for bucket in buckets.values():
total_duration = 0 table += bucket.as_list(add_bucket_titles=record_bucket_names, filter=filter)
for i in range(0, args.repeats): return table
count = value["count"][i] table = create_table(buckets, record_bucket_names=args.group, filter=args.filter)
duration = value["duration"][i]
total_count += count headers = [""] + ["Count", "Duration\n(ms)"] * args.repeats
total_duration += duration
row += [count, duration]
if args.repeats > 1:
totals = [total_count / args.repeats]
if args.stdev:
totals += [statistics.stdev(row[1:-1:2])]
totals += [total_duration / args.repeats]
if args.stdev:
totals += [statistics.stdev(row[2:-1:2])]
row += totals
table += [row]
def sort_duration(value):
return value[-1]
table.sort(key=sort_duration)
headers = [""] + ["Count", "Duration (ms)"] * args.repeats
if args.repeats > 1: if args.repeats > 1:
if args.stdev: if args.stdev:
headers += ["Count Mean", "Count Stdev", headers += ["Count\nMean", "Count\nStdev",
"Duration Mean (ms)", "Duration Stdev"] "Duration\nMean (ms)", "Duration\nStdev (ms)"]
else: else:
headers += ["Count Mean", "Duration Mean (ms)"] headers += ["Count\nMean", "Duration\nMean (ms)"]
if args.out_file: if args.out_file:
with open(args.out_file, "w", newline="") as f: with open(args.out_file, "w", newline="") as f:
write_output(f, table, headers, args.format) write_output(f, table, headers, args.repeats, args.format)
else: else:
write_output(sys.stdout, table, headers, args.format) write_output(sys.stdout, table, headers, args.repeats, args.format)
if __name__ == '__main__': if __name__ == '__main__':
sys.exit(main()) sys.exit(main())
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment