Commit f7afe008 authored by Sergiy Belozorov's avatar Sergiy Belozorov Committed by Commit Bot

[tools] Refactor run_perf.py and run_perf_test.py

- Remove all relative imports from mock and os
- Fix mocking in a few tests to prevent cross-test side-effects
- Add run_perf_test.py to v8_presubmit.py
- The vpython config was not added since root .vpython already includes
  coverage and mock libraries
- Convert all double-quoted strings to single-quoted (PS8->PS9)

R=sergiyb@chromium.org

Bug: chromium:123456
Change-Id: I7b3a08dc5d950b0f51cc7a5eb3a012ea953ca824
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1564206
Commit-Queue: Sergiy Belozorov <sergiyb@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60810}
parent 0c1d1a40
......@@ -125,16 +125,16 @@ except NameError: # Python 3
basestring = str
ARCH_GUESS = utils.DefaultArch()
SUPPORTED_ARCHS = ["arm",
"ia32",
"mips",
"mipsel",
"x64",
"arm64"]
GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
SUPPORTED_ARCHS = ['arm',
'ia32',
'mips',
'mipsel',
'x64',
'arm64']
GENERIC_RESULTS_RE = re.compile(r'^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$')
RESULT_STDDEV_RE = re.compile(r'^\{([^\}]+)\}$')
RESULT_LIST_RE = re.compile(r'^\[([^\]]+)\]$')
TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
INFRA_FAILURE_RETCODE = 87
......@@ -163,14 +163,14 @@ class Results(object):
def ToDict(self):
return {
"traces": self.traces,
"errors": self.errors,
"timeouts": self.timeouts,
"near_timeouts": self.near_timeouts,
'traces': self.traces,
'errors': self.errors,
'timeouts': self.timeouts,
'near_timeouts': self.near_timeouts,
}
def WriteToFile(self, file_name):
with open(file_name, "w") as f:
with open(file_name, 'w') as f:
f.write(json.dumps(self.ToDict()))
def __add__(self, other):
......@@ -198,7 +198,7 @@ class Measurement(object):
self.stddev_regexp = stddev_regexp
self.results = []
self.errors = []
self.stddev = ""
self.stddev = ''
self.process_size = False
def ConsumeOutput(self, stdout):
......@@ -206,28 +206,28 @@ class Measurement(object):
result = re.search(self.results_regexp, stdout, re.M).group(1)
self.results.append(str(float(result)))
except ValueError:
self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
self.errors.append('Regexp "%s" returned a non-numeric for test %s.'
% (self.results_regexp, self.name))
except:
self.errors.append("Regexp \"%s\" didn't match for test %s."
self.errors.append('Regexp "%s" did not match for test %s.'
% (self.results_regexp, self.name))
try:
if self.stddev_regexp and self.stddev:
self.errors.append("Test %s should only run once since a stddev "
"is provided by the test." % self.name)
self.errors.append('Test %s should only run once since a stddev '
'is provided by the test.' % self.name)
if self.stddev_regexp:
self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
except:
self.errors.append("Regexp \"%s\" didn't match for test %s."
self.errors.append('Regexp "%s" did not match for test %s.'
% (self.stddev_regexp, self.name))
def GetResults(self):
return Results([{
"graphs": self.graphs,
"units": self.units,
"results": self.results,
"stddev": self.stddev,
'graphs': self.graphs,
'units': self.units,
'results': self.results,
'stddev': self.stddev,
}], self.errors)
......@@ -265,7 +265,7 @@ def RunResultsProcessor(results_processor, stdout, count):
stderr=subprocess.PIPE,
)
result, _ = p.communicate(input=stdout)
logging.info(">>> Processed stdout (#%d):\n%s", count, result)
logging.info('>>> Processed stdout (#%d):\n%s', count, result)
return result
......@@ -277,7 +277,7 @@ def AccumulateResults(
Args:
graph_names: List of names that configure the base path of the traces. E.g.
['v8', 'Octane'].
trace_configs: List of "TraceConfig" instances. Each trace config defines
trace_configs: List of 'TraceConfig' instances. Each trace config defines
how to perform a measurement.
iter_output: Iterator over the standard output of each test run.
perform_measurement: Whether to actually run tests and perform measurements.
......@@ -285,7 +285,7 @@ def AccumulateResults(
and trybot, but want to ignore second run on CI without
having to spread this logic throughout the script.
calc_total: Boolean flag to speficy the calculation of a summary trace.
Returns: A "Results" object.
Returns: A 'Results' object.
"""
measurements = [
trace.CreateMeasurement(perform_measurement) for trace in trace_configs]
......@@ -299,21 +299,21 @@ def AccumulateResults(
return res
# Assume all traces have the same structure.
if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
res.errors.append("Not all traces have the same number of results.")
if len(set(map(lambda t: len(t['results']), res.traces))) != 1:
res.errors.append('Not all traces have the same number of results.')
return res
# Calculate the geometric means for all traces. Above we made sure that
# there is at least one trace and that the number of results is the same
# for each trace.
n_results = len(res.traces[0]["results"])
total_results = [GeometricMean(t["results"][i] for t in res.traces)
n_results = len(res.traces[0]['results'])
total_results = [GeometricMean(t['results'][i] for t in res.traces)
for i in range(0, n_results)]
res.traces.append({
"graphs": graph_names + ["Total"],
"units": res.traces[0]["units"],
"results": total_results,
"stddev": "",
'graphs': graph_names + ['Total'],
'units': res.traces[0]['units'],
'results': total_results,
'stddev': '',
})
return res
......@@ -327,7 +327,7 @@ def AccumulateGenericResults(graph_names, suite_units, iter_output):
['v8', 'Octane'].
suite_units: Measurement default units as defined by the benchmark suite.
iter_output: Iterator over the standard output of each test run.
Returns: A "Results" object.
Returns: A 'Results' object.
"""
traces = OrderedDict()
for stdout in iter_output():
......@@ -337,7 +337,7 @@ def AccumulateGenericResults(graph_names, suite_units, iter_output):
for line in stdout.strip().splitlines():
match = GENERIC_RESULTS_RE.match(line)
if match:
stddev = ""
stddev = ''
graph = match.group(1)
trace = match.group(2)
body = match.group(3)
......@@ -346,10 +346,10 @@ def AccumulateGenericResults(graph_names, suite_units, iter_output):
match_list = RESULT_LIST_RE.match(body)
errors = []
if match_stddev:
result, stddev = map(str.strip, match_stddev.group(1).split(","))
result, stddev = map(str.strip, match_stddev.group(1).split(','))
results = [result]
elif match_list:
results = map(str.strip, match_list.group(1).split(","))
results = map(str.strip, match_list.group(1).split(','))
else:
results = [body.strip()]
......@@ -357,17 +357,17 @@ def AccumulateGenericResults(graph_names, suite_units, iter_output):
results = map(lambda r: str(float(r)), results)
except ValueError:
results = []
errors = ["Found non-numeric in %s" %
"/".join(graph_names + [graph, trace])]
errors = ['Found non-numeric in %s' %
'/'.join(graph_names + [graph, trace])]
trace_result = traces.setdefault(trace, Results([{
"graphs": graph_names + [graph, trace],
"units": (units or suite_units).strip(),
"results": [],
"stddev": "",
'graphs': graph_names + [graph, trace],
'units': (units or suite_units).strip(),
'results': [],
'stddev': '',
}], errors))
trace_result.traces[0]["results"].extend(results)
trace_result.traces[0]["stddev"] = stddev
trace_result.traces[0]['results'].extend(results)
trace_result.traces[0]['stddev'] = stddev
return reduce(lambda r, t: r + t, traces.itervalues(), Results())
......@@ -383,7 +383,7 @@ class Node(object):
class DefaultSentinel(Node):
"""Fake parent node with all default values."""
def __init__(self, binary = "d8"):
def __init__(self, binary = 'd8'):
super(DefaultSentinel, self).__init__()
self.binary = binary
self.run_count = 10
......@@ -397,7 +397,7 @@ class DefaultSentinel(Node):
self.results_processor = None
self.results_regexp = None
self.stddev_regexp = None
self.units = "score"
self.units = 'score'
self.total = False
self.owners = []
......@@ -411,34 +411,34 @@ class GraphConfig(Node):
super(GraphConfig, self).__init__()
self._suite = suite
assert isinstance(suite.get("path", []), list)
assert isinstance(suite.get("owners", []), list)
assert isinstance(suite["name"], basestring)
assert isinstance(suite.get("flags", []), list)
assert isinstance(suite.get("test_flags", []), list)
assert isinstance(suite.get("resources", []), list)
assert isinstance(suite.get('path', []), list)
assert isinstance(suite.get('owners', []), list)
assert isinstance(suite['name'], basestring)
assert isinstance(suite.get('flags', []), list)
assert isinstance(suite.get('test_flags', []), list)
assert isinstance(suite.get('resources', []), list)
# Accumulated values.
self.path = parent.path[:] + suite.get("path", [])
self.graphs = parent.graphs[:] + [suite["name"]]
self.flags = parent.flags[:] + suite.get("flags", [])
self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
self.owners = parent.owners[:] + suite.get("owners", [])
self.path = parent.path[:] + suite.get('path', [])
self.graphs = parent.graphs[:] + [suite['name']]
self.flags = parent.flags[:] + suite.get('flags', [])
self.test_flags = parent.test_flags[:] + suite.get('test_flags', [])
self.owners = parent.owners[:] + suite.get('owners', [])
# Values independent of parent node.
self.resources = suite.get("resources", [])
self.resources = suite.get('resources', [])
# Descrete values (with parent defaults).
self.binary = suite.get("binary", parent.binary)
self.run_count = suite.get("run_count", parent.run_count)
self.run_count = suite.get("run_count_%s" % arch, self.run_count)
self.timeout = suite.get("timeout", parent.timeout)
self.timeout = suite.get("timeout_%s" % arch, self.timeout)
self.units = suite.get("units", parent.units)
self.total = suite.get("total", parent.total)
self.binary = suite.get('binary', parent.binary)
self.run_count = suite.get('run_count', parent.run_count)
self.run_count = suite.get('run_count_%s' % arch, self.run_count)
self.timeout = suite.get('timeout', parent.timeout)
self.timeout = suite.get('timeout_%s' % arch, self.timeout)
self.units = suite.get('units', parent.units)
self.total = suite.get('total', parent.total)
self.results_processor = suite.get(
"results_processor", parent.results_processor)
self.process_size = suite.get("process_size", parent.process_size)
'results_processor', parent.results_processor)
self.process_size = suite.get('process_size', parent.process_size)
# A regular expression for results. If the parent graph provides a
# regexp and the current suite has none, a string place holder for the
......@@ -446,17 +446,17 @@ class GraphConfig(Node):
# TODO(machenbach): Currently that makes only sense for the leaf level.
# Multiple place holders for multiple levels are not supported.
if parent.results_regexp:
regexp_default = parent.results_regexp % re.escape(suite["name"])
regexp_default = parent.results_regexp % re.escape(suite['name'])
else:
regexp_default = None
self.results_regexp = suite.get("results_regexp", regexp_default)
self.results_regexp = suite.get('results_regexp', regexp_default)
# A similar regular expression for the standard deviation (optional).
if parent.stddev_regexp:
stddev_default = parent.stddev_regexp % re.escape(suite["name"])
stddev_default = parent.stddev_regexp % re.escape(suite['name'])
else:
stddev_default = None
self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
self.stddev_regexp = suite.get('stddev_regexp', stddev_default)
class TraceConfig(GraphConfig):
......@@ -488,7 +488,7 @@ class RunnableConfig(GraphConfig):
@property
def main(self):
return self._suite.get("main", "")
return self._suite.get('main', '')
def PostProcess(self, stdouts_iter):
if self.results_processor:
......@@ -509,17 +509,17 @@ class RunnableConfig(GraphConfig):
os.chdir(os.path.join(suite_dir, bench_dir))
def GetCommandFlags(self, extra_flags=None):
suffix = ["--"] + self.test_flags if self.test_flags else []
suffix = ['--'] + self.test_flags if self.test_flags else []
return self.flags + (extra_flags or []) + [self.main] + suffix
def GetCommand(self, cmd_prefix, shell_dir, extra_flags=None):
# TODO(machenbach): This requires +.exe if run on windows.
extra_flags = extra_flags or []
if self.binary != 'd8' and '--prof' in extra_flags:
logging.info("Profiler supported only on a benchmark run with d8")
logging.info('Profiler supported only on a benchmark run with d8')
if self.process_size:
cmd_prefix = ["/usr/bin/time", "--format=MaxMemory: %MKB"] + cmd_prefix
cmd_prefix = ['/usr/bin/time', '--format=MaxMemory: %MKB'] + cmd_prefix
if self.binary.endswith('.py'):
# Copy cmd_prefix instead of update (+=).
cmd_prefix = cmd_prefix + [sys.executable]
......@@ -587,23 +587,23 @@ def MakeGraphConfig(suite, arch, parent):
if isinstance(parent, RunnableConfig):
# Below a runnable can only be traces.
return TraceConfig(suite, parent, arch)
elif suite.get("main") is not None:
elif suite.get('main') is not None:
# A main file makes this graph runnable. Empty strings are accepted.
if suite.get("tests"):
if suite.get('tests'):
# This graph has subgraphs (traces).
return RunnableConfig(suite, parent, arch)
else:
# This graph has no subgraphs, it's a leaf.
return RunnableTraceConfig(suite, parent, arch)
elif suite.get("generic"):
elif suite.get('generic'):
# This is a generic suite definition. It is either a runnable executable
# or has a main js file.
return RunnableGenericConfig(suite, parent, arch)
elif suite.get("tests"):
elif suite.get('tests'):
# This is neither a leaf nor a runnable.
return GraphConfig(suite, parent, arch)
else: # pragma: no cover
raise Exception("Invalid suite configuration.")
raise Exception('Invalid suite configuration.')
def BuildGraphConfigs(suite, arch, parent):
......@@ -612,11 +612,11 @@ def BuildGraphConfigs(suite, arch, parent):
"""
# TODO(machenbach): Implement notion of cpu type?
if arch not in suite.get("archs", SUPPORTED_ARCHS):
if arch not in suite.get('archs', SUPPORTED_ARCHS):
return None
graph = MakeGraphConfig(suite, arch, parent)
for subsuite in suite.get("tests", []):
for subsuite in suite.get('tests', []):
BuildGraphConfigs(subsuite, arch, graph)
parent.AppendChild(graph)
return graph
......@@ -634,7 +634,7 @@ def FlattenRunnables(node, node_cb):
for result in FlattenRunnables(child, node_cb):
yield result
else: # pragma: no cover
raise Exception("Invalid suite configuration.")
raise Exception('Invalid suite configuration.')
class Platform(object):
......@@ -697,9 +697,9 @@ class DesktopPlatform(Platform):
command.setup(utils.GuessOS(), options.device)
if options.prioritize or options.affinitize != None:
self.command_prefix = ["schedtool"]
self.command_prefix = ['schedtool']
if options.prioritize:
self.command_prefix += ["-n", "-20"]
self.command_prefix += ['-n', '-20']
if options.affinitize != None:
# schedtool expects a bit pattern when setting affinity, where each
# bit set to '1' corresponds to a core where the process may run on.
......@@ -707,8 +707,8 @@ class DesktopPlatform(Platform):
# a core number, we need to map to said bit pattern.
cpu = int(options.affinitize)
core = 1 << cpu
self.command_prefix += ["-a", ("0x%x" % core)]
self.command_prefix += ["-e"]
self.command_prefix += ['-a', ('0x%x' % core)]
self.command_prefix += ['-e']
def PreExecution(self):
pass
......@@ -723,33 +723,33 @@ class DesktopPlatform(Platform):
def _Run(self, runnable, count, secondary=False):
suffix = ' - secondary' if secondary else ''
shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
try:
output = cmd.execute()
except OSError: # pragma: no cover
logging.exception(title % "OSError")
logging.exception(title % 'OSError')
raise
logging.info(title % "Stdout" + "\n%s", output.stdout)
logging.info(title % 'Stdout' + '\n%s', output.stdout)
if output.stderr: # pragma: no cover
# Print stderr for debugging.
logging.info(title % "Stderr" + "\n%s", output.stderr)
logging.info(title % 'Stderr' + '\n%s', output.stderr)
if output.timed_out:
logging.warning(">>> Test timed out after %ss.", runnable.timeout)
logging.warning('>>> Test timed out after %ss.', runnable.timeout)
runnable.has_timeouts = True
raise TestFailedError()
if output.exit_code != 0:
logging.warning(">>> Test crashed.")
logging.warning('>>> Test crashed.')
raise TestFailedError()
if '--prof' in self.extra_flags:
os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
os_prefix = {'linux': 'linux', 'macos': 'mac'}.get(utils.GuessOS())
if os_prefix:
tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
subprocess.check_call(tick_tools + " --only-summary", shell=True)
tick_tools = os.path.join(TOOLS_BASE, '%s-tick-processor' % os_prefix)
subprocess.check_call(tick_tools + ' --only-summary', shell=True)
else: # pragma: no cover
logging.warning(
"Profiler option currently supported on Linux and Mac OS.")
'Profiler option currently supported on Linux and Mac OS.')
# time outputs to stderr
if runnable.process_size:
......@@ -778,13 +778,13 @@ class AndroidPlatform(Platform): # pragma: no cover
bench_rel = os.path.normpath(os.path.join(*node.path))
bench_abs = os.path.join(suite_dir, bench_rel)
else:
bench_rel = "."
bench_rel = '.'
bench_abs = suite_dir
self.driver.push_executable(self.shell_dir, "bin", node.binary)
self.driver.push_executable(self.shell_dir, 'bin', node.binary)
if self.shell_dir_secondary:
self.driver.push_executable(
self.shell_dir_secondary, "bin_secondary", node.binary)
self.shell_dir_secondary, 'bin_secondary', node.binary)
if isinstance(node, RunnableConfig):
self.driver.push_file(bench_abs, node.main, bench_rel)
......@@ -793,15 +793,15 @@ class AndroidPlatform(Platform): # pragma: no cover
def _Run(self, runnable, count, secondary=False):
suffix = ' - secondary' if secondary else ''
target_dir = "bin_secondary" if secondary else "bin"
title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
target_dir = 'bin_secondary' if secondary else 'bin'
title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
self.driver.drop_ram_caches()
# Relative path to benchmark directory.
if runnable.path:
bench_rel = os.path.normpath(os.path.join(*runnable.path))
else:
bench_rel = "."
bench_rel = '.'
logcat_file = None
if self.options.dump_logcats_to:
......@@ -820,19 +820,19 @@ class AndroidPlatform(Platform): # pragma: no cover
timeout=runnable.timeout,
logcat_file=logcat_file,
)
logging.info(title % "Stdout" + "\n%s", stdout)
logging.info(title % 'Stdout' + '\n%s', stdout)
except android.CommandFailedException as e:
logging.info(title % "Stdout" + "\n%s", e.output)
logging.info(title % 'Stdout' + '\n%s', e.output)
logging.warning('>>> Test crashed.')
raise TestFailedError()
except android.TimeoutException as e:
if e.output is not None:
logging.info(title % "Stdout" + "\n%s", e.output)
logging.warning(">>> Test timed out after %ss.", runnable.timeout)
logging.info(title % 'Stdout' + '\n%s', e.output)
logging.warning('>>> Test timed out after %ss.', runnable.timeout)
runnable.has_timeouts = True
raise TestFailedError()
if runnable.process_size:
return stdout + "MaxMemory: Unsupported"
return stdout + 'MaxMemory: Unsupported'
return stdout
class CustomMachineConfiguration:
......@@ -860,44 +860,44 @@ class CustomMachineConfiguration:
@staticmethod
def GetASLR():
try:
with open("/proc/sys/kernel/randomize_va_space", "r") as f:
with open('/proc/sys/kernel/randomize_va_space', 'r') as f:
return int(f.readline().strip())
except Exception:
logging.exception("Failed to get current ASLR settings.")
logging.exception('Failed to get current ASLR settings.')
raise
@staticmethod
def SetASLR(value):
try:
with open("/proc/sys/kernel/randomize_va_space", "w") as f:
with open('/proc/sys/kernel/randomize_va_space', 'w') as f:
f.write(str(value))
except Exception:
logging.exception(
"Failed to update ASLR to %s. Are we running under sudo?", value)
'Failed to update ASLR to %s. Are we running under sudo?', value)
raise
new_value = CustomMachineConfiguration.GetASLR()
if value != new_value:
raise Exception("Present value is %s" % new_value)
raise Exception('Present value is %s' % new_value)
@staticmethod
def GetCPUCoresRange():
try:
with open("/sys/devices/system/cpu/present", "r") as f:
with open('/sys/devices/system/cpu/present', 'r') as f:
indexes = f.readline()
r = map(int, indexes.split("-"))
r = map(int, indexes.split('-'))
if len(r) == 1:
return range(r[0], r[0] + 1)
return range(r[0], r[1] + 1)
except Exception:
logging.exception("Failed to retrieve number of CPUs.")
logging.exception('Failed to retrieve number of CPUs.')
raise
@staticmethod
def GetCPUPathForId(cpu_index):
ret = "/sys/devices/system/cpu/cpu"
ret = '/sys/devices/system/cpu/cpu'
ret += str(cpu_index)
ret += "/cpufreq/scaling_governor"
ret += '/cpufreq/scaling_governor'
return ret
@staticmethod
......@@ -907,17 +907,17 @@ class CustomMachineConfiguration:
ret = None
for cpu_index in cpu_indices:
cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
with open(cpu_device, "r") as f:
with open(cpu_device, 'r') as f:
# We assume the governors of all CPUs are set to the same value
val = f.readline().strip()
if ret == None:
ret = val
elif ret != val:
raise Exception("CPU cores have differing governor settings")
raise Exception('CPU cores have differing governor settings')
return ret
except Exception:
logging.exception("Failed to get the current CPU governor. Is the CPU "
"governor disabled? Check BIOS.")
logging.exception('Failed to get the current CPU governor. Is the CPU '
'governor disabled? Check BIOS.')
raise
@staticmethod
......@@ -926,123 +926,123 @@ class CustomMachineConfiguration:
cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
for cpu_index in cpu_indices:
cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
with open(cpu_device, "w") as f:
with open(cpu_device, 'w') as f:
f.write(value)
except Exception:
logging.exception("Failed to change CPU governor to %s. Are we "
"running under sudo?", value)
logging.exception('Failed to change CPU governor to %s. Are we '
'running under sudo?', value)
raise
cur_value = CustomMachineConfiguration.GetCPUGovernor()
if cur_value != value:
raise Exception("Could not set CPU governor. Present value is %s"
raise Exception('Could not set CPU governor. Present value is %s'
% cur_value )
def Main(args):
parser = optparse.OptionParser()
parser.add_option("--android-build-tools", help="Deprecated.")
parser.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect"),
default="x64")
parser.add_option("--buildbot",
help="Adapt to path structure used on buildbots and adds "
"timestamps/level to all logged status messages",
default=False, action="store_true")
parser.add_option("-d", "--device",
help="The device ID to run Android tests on. If not given "
"it will be autodetected.")
parser.add_option("--extra-flags",
help="Additional flags to pass to the test executable",
default="")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("--json-test-results-secondary",
"--json-test-results-no-patch", # TODO(sergiyb): Deprecate.
help="Path to a file for storing json results from run "
"without patch or for reference build run.")
parser.add_option("--outdir", help="Base directory with compile output",
default="out")
parser.add_option("--outdir-secondary",
"--outdir-no-patch", # TODO(sergiyb): Deprecate.
help="Base directory with compile output without patch or "
"for reference build")
parser.add_option("--binary-override-path",
help="JavaScript engine binary. By default, d8 under "
"architecture-specific build dir. "
"Not supported in conjunction with outdir-secondary.")
parser.add_option("--prioritize",
help="Raise the priority to nice -20 for the benchmarking "
"process.Requires Linux, schedtool, and sudo privileges.",
default=False, action="store_true")
parser.add_option("--affinitize",
help="Run benchmarking process on the specified core. "
"For example: "
"--affinitize=0 will run the benchmark process on core 0. "
"--affinitize=3 will run the benchmark process on core 3. "
"Requires Linux, schedtool, and sudo privileges.",
parser.add_option('--android-build-tools', help='Deprecated.')
parser.add_option('--arch',
help=('The architecture to run tests for, '
'"auto" or "native" for auto-detect'),
default='x64')
parser.add_option('--buildbot',
help='Adapt to path structure used on buildbots and adds '
'timestamps/level to all logged status messages',
default=False, action='store_true')
parser.add_option('-d', '--device',
help='The device ID to run Android tests on. If not given '
'it will be autodetected.')
parser.add_option('--extra-flags',
help='Additional flags to pass to the test executable',
default='')
parser.add_option('--json-test-results',
help='Path to a file for storing json results.')
parser.add_option('--json-test-results-secondary',
'--json-test-results-no-patch', # TODO(sergiyb): Deprecate.
help='Path to a file for storing json results from run '
'without patch or for reference build run.')
parser.add_option('--outdir', help='Base directory with compile output',
default='out')
parser.add_option('--outdir-secondary',
'--outdir-no-patch', # TODO(sergiyb): Deprecate.
help='Base directory with compile output without patch or '
'for reference build')
parser.add_option('--binary-override-path',
help='JavaScript engine binary. By default, d8 under '
'architecture-specific build dir. '
'Not supported in conjunction with outdir-secondary.')
parser.add_option('--prioritize',
help='Raise the priority to nice -20 for the benchmarking '
'process.Requires Linux, schedtool, and sudo privileges.',
default=False, action='store_true')
parser.add_option('--affinitize',
help='Run benchmarking process on the specified core. '
'For example: '
'--affinitize=0 will run the benchmark process on core 0. '
'--affinitize=3 will run the benchmark process on core 3. '
'Requires Linux, schedtool, and sudo privileges.',
default=None)
parser.add_option("--noaslr",
help="Disable ASLR for the duration of the benchmarked "
"process. Requires Linux and sudo privileges.",
default=False, action="store_true")
parser.add_option("--cpu-governor",
help="Set cpu governor to specified policy for the "
"duration of the benchmarked process. Typical options: "
"'powersave' for more stable results, or 'performance' "
"for shorter completion time of suite, with potentially "
"more noise in results.")
parser.add_option("--filter",
help="Only run the benchmarks beginning with this string. "
"For example: "
"--filter=JSTests/TypedArrays/ will run only TypedArray "
"benchmarks from the JSTests suite.",
default="")
parser.add_option("--run-count-multiplier", default=1, type="int",
help="Multipled used to increase number of times each test "
"is retried.")
parser.add_option("--dump-logcats-to",
help="Writes logcat output from each test into specified "
"directory. Only supported for android targets.")
parser.add_option('--noaslr',
help='Disable ASLR for the duration of the benchmarked '
'process. Requires Linux and sudo privileges.',
default=False, action='store_true')
parser.add_option('--cpu-governor',
help='Set cpu governor to specified policy for the '
'duration of the benchmarked process. Typical options: '
'"powersave" for more stable results, or "performance" '
'for shorter completion time of suite, with potentially '
'more noise in results.')
parser.add_option('--filter',
help='Only run the benchmarks beginning with this string. '
'For example: '
'--filter=JSTests/TypedArrays/ will run only TypedArray '
'benchmarks from the JSTests suite.',
default='')
parser.add_option('--run-count-multiplier', default=1, type='int',
help='Multipled used to increase number of times each test '
'is retried.')
parser.add_option('--dump-logcats-to',
help='Writes logcat output from each test into specified '
'directory. Only supported for android targets.')
(options, args) = parser.parse_args(args)
logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s")
logging.basicConfig(level=logging.INFO, format='%(levelname)-8s %(message)s')
if len(args) == 0: # pragma: no cover
parser.print_help()
return INFRA_FAILURE_RETCODE
if options.arch in ["auto", "native"]: # pragma: no cover
if options.arch in ['auto', 'native']: # pragma: no cover
options.arch = ARCH_GUESS
if not options.arch in SUPPORTED_ARCHS: # pragma: no cover
logging.error("Unknown architecture %s", options.arch)
logging.error('Unknown architecture %s', options.arch)
return INFRA_FAILURE_RETCODE
if (options.json_test_results_secondary and
not options.outdir_secondary): # pragma: no cover
logging.error("For writing secondary json test results, a secondary outdir "
"patch must be specified.")
logging.error('For writing secondary json test results, a secondary outdir '
'patch must be specified.')
return INFRA_FAILURE_RETCODE
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if options.buildbot:
build_config = "Release"
build_config = 'Release'
else:
build_config = "%s.release" % options.arch
build_config = '%s.release' % options.arch
if options.binary_override_path == None:
options.shell_dir = os.path.join(workspace, options.outdir, build_config)
default_binary_name = "d8"
default_binary_name = 'd8'
else:
if not os.path.isfile(options.binary_override_path):
logging.error("binary-override-path must be a file name")
logging.error('binary-override-path must be a file name')
return INFRA_FAILURE_RETCODE
if options.outdir_secondary:
logging.error("specify either binary-override-path or outdir-secondary")
logging.error('specify either binary-override-path or outdir-secondary')
return INFRA_FAILURE_RETCODE
options.shell_dir = os.path.abspath(
os.path.dirname(options.binary_override_path))
......@@ -1077,14 +1077,14 @@ def Main(args):
disable_aslr = options.noaslr) as conf:
for path in args:
if not os.path.exists(path): # pragma: no cover
results.errors.append("Configuration file %s does not exist." % path)
results.errors.append('Configuration file %s does not exist.' % path)
continue
with open(path) as f:
suite = json.loads(f.read())
# If no name is given, default to the file name without .json.
suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
suite.setdefault('name', os.path.splitext(os.path.basename(path))[0])
# Setup things common to one test suite.
platform.PreExecution()
......@@ -1099,11 +1099,11 @@ def Main(args):
# Traverse graph/trace tree and iterate over all runnables.
for runnable in FlattenRunnables(root, NodeCB):
runnable_name = "/".join(runnable.graphs)
runnable_name = '/'.join(runnable.graphs)
if (not runnable_name.startswith(options.filter) and
runnable_name + "/" != options.filter):
runnable_name + '/' != options.filter):
continue
logging.info(">>> Running suite: %s", runnable_name)
logging.info('>>> Running suite: %s', runnable_name)
def Runner():
"""Output generator that reruns several times."""
......@@ -1152,5 +1152,5 @@ def MainWrapper():
return INFRA_FAILURE_RETCODE
if __name__ == "__main__": # pragma: no cover
if __name__ == '__main__': # pragma: no cover
sys.exit(MainWrapper())
......@@ -9,12 +9,12 @@ from __future__ import print_function
from collections import namedtuple
import coverage
import json
from mock import MagicMock, patch
import mock
import os
from os import path, sys
import platform
import shutil
import subprocess
import sys
import tempfile
import unittest
......@@ -25,78 +25,77 @@ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
RUN_PERF = os.path.join(BASE_DIR, 'run_perf.py')
TEST_DATA = os.path.join(BASE_DIR, 'unittests', 'testdata')
TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-perf")
TEST_WORKSPACE = os.path.join(tempfile.gettempdir(), 'test-v8-run-perf')
V8_JSON = {
"path": ["."],
"owners": ["username@chromium.org"],
"binary": "d7",
"flags": ["--flag"],
"main": "run.js",
"run_count": 1,
"results_regexp": "^%s: (.+)$",
"tests": [
{"name": "Richards"},
{"name": "DeltaBlue"},
'path': ['.'],
'owners': ['username@chromium.org'],
'binary': 'd7',
'flags': ['--flag'],
'main': 'run.js',
'run_count': 1,
'results_regexp': '^%s: (.+)$',
'tests': [
{'name': 'Richards'},
{'name': 'DeltaBlue'},
]
}
V8_NESTED_SUITES_JSON = {
"path": ["."],
"owners": ["username@chromium.org"],
"flags": ["--flag"],
"run_count": 1,
"units": "score",
"tests": [
{"name": "Richards",
"path": ["richards"],
"binary": "d7",
"main": "run.js",
"resources": ["file1.js", "file2.js"],
"run_count": 2,
"results_regexp": "^Richards: (.+)$"},
{"name": "Sub",
"path": ["sub"],
"tests": [
{"name": "Leaf",
"path": ["leaf"],
"run_count_x64": 3,
"units": "ms",
"main": "run.js",
"results_regexp": "^Simple: (.+) ms.$"},
'path': ['.'],
'owners': ['username@chromium.org'],
'flags': ['--flag'],
'run_count': 1,
'units': 'score',
'tests': [
{'name': 'Richards',
'path': ['richards'],
'binary': 'd7',
'main': 'run.js',
'resources': ['file1.js', 'file2.js'],
'run_count': 2,
'results_regexp': '^Richards: (.+)$'},
{'name': 'Sub',
'path': ['sub'],
'tests': [
{'name': 'Leaf',
'path': ['leaf'],
'run_count_x64': 3,
'units': 'ms',
'main': 'run.js',
'results_regexp': '^Simple: (.+) ms.$'},
]
},
{"name": "DeltaBlue",
"path": ["delta_blue"],
"main": "run.js",
"flags": ["--flag2"],
"results_regexp": "^DeltaBlue: (.+)$"},
{"name": "ShouldntRun",
"path": ["."],
"archs": ["arm"],
"main": "run.js"},
{'name': 'DeltaBlue',
'path': ['delta_blue'],
'main': 'run.js',
'flags': ['--flag2'],
'results_regexp': '^DeltaBlue: (.+)$'},
{'name': 'ShouldntRun',
'path': ['.'],
'archs': ['arm'],
'main': 'run.js'},
]
}
V8_GENERIC_JSON = {
"path": ["."],
"owners": ["username@chromium.org"],
"binary": "cc",
"flags": ["--flag"],
"generic": True,
"run_count": 1,
"units": "ms",
'path': ['.'],
'owners': ['username@chromium.org'],
'binary': 'cc',
'flags': ['--flag'],
'generic': True,
'run_count': 1,
'units': 'ms',
}
Output = namedtuple("Output", "stdout, stderr, timed_out, exit_code")
Output = namedtuple('Output', 'stdout, stderr, timed_out, exit_code')
class PerfTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.base = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(cls.base)
sys.path.insert(0, BASE_DIR)
cls._cov = coverage.coverage(
include=([os.path.join(cls.base, "run_perf.py")]))
include=([os.path.join(BASE_DIR, 'run_perf.py')]))
cls._cov.start()
import run_perf
from testrunner.local import command
......@@ -106,56 +105,56 @@ class PerfTest(unittest.TestCase):
@classmethod
def tearDownClass(cls):
cls._cov.stop()
print("")
print('')
print(cls._cov.report())
def setUp(self):
self.maxDiff = None
if path.exists(TEST_WORKSPACE):
if os.path.exists(TEST_WORKSPACE):
shutil.rmtree(TEST_WORKSPACE)
os.makedirs(TEST_WORKSPACE)
def tearDown(self):
patch.stopall()
if path.exists(TEST_WORKSPACE):
mock.patch.stopall()
if os.path.exists(TEST_WORKSPACE):
shutil.rmtree(TEST_WORKSPACE)
def _WriteTestInput(self, json_content):
self._test_input = path.join(TEST_WORKSPACE, "test.json")
with open(self._test_input, "w") as f:
self._test_input = os.path.join(TEST_WORKSPACE, 'test.json')
with open(self._test_input, 'w') as f:
f.write(json.dumps(json_content))
def _MockCommand(self, *args, **kwargs):
# Fake output for each test run.
test_outputs = [Output(stdout=arg,
stderr=None,
timed_out=kwargs.get("timed_out", False),
exit_code=kwargs.get("exit_code", 0))
timed_out=kwargs.get('timed_out', False),
exit_code=kwargs.get('exit_code', 0))
for arg in args[1]]
def create_cmd(*args, **kwargs):
cmd = MagicMock()
cmd = mock.MagicMock()
def execute(*args, **kwargs):
return test_outputs.pop()
cmd.execute = MagicMock(side_effect=execute)
cmd.execute = mock.MagicMock(side_effect=execute)
return cmd
patch.object(
mock.patch.object(
run_perf.command, 'PosixCommand',
MagicMock(side_effect=create_cmd)).start()
mock.MagicMock(side_effect=create_cmd)).start()
# Check that d8 is called from the correct cwd for each test run.
dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
dirs = [os.path.join(TEST_WORKSPACE, arg) for arg in args[0]]
def chdir(*args, **kwargs):
self.assertEquals(dirs.pop(), args[0])
os.chdir = MagicMock(side_effect=chdir)
os.chdir = mock.MagicMock(side_effect=chdir)
subprocess.check_call = MagicMock()
platform.system = MagicMock(return_value='Linux')
subprocess.check_call = mock.MagicMock()
platform.system = mock.MagicMock(return_value='Linux')
def _CallMain(self, *args):
self._test_output = path.join(TEST_WORKSPACE, "results.json")
self._test_output = os.path.join(TEST_WORKSPACE, 'results.json')
all_args=[
"--json-test-results",
'--json-test-results',
self._test_output,
self._test_input,
]
......@@ -168,17 +167,17 @@ class PerfTest(unittest.TestCase):
def _VerifyResults(self, suite, units, traces, file_name=None):
self.assertEquals([
{"units": units,
"graphs": [suite, trace["name"]],
"results": trace["results"],
"stddev": trace["stddev"]} for trace in traces],
self._LoadResults(file_name)["traces"])
{'units': units,
'graphs': [suite, trace['name']],
'results': trace['results'],
'stddev': trace['stddev']} for trace in traces],
self._LoadResults(file_name)['traces'])
def _VerifyErrors(self, errors):
self.assertEquals(errors, self._LoadResults()["errors"])
self.assertEquals(errors, self._LoadResults()['errors'])
def _VerifyMock(self, binary, *args, **kwargs):
shell = path.join(path.dirname(self.base), binary)
shell = os.path.join(os.path.dirname(BASE_DIR), binary)
command.Command.assert_called_with(
cmd_prefix=[],
shell=shell,
......@@ -190,7 +189,7 @@ class PerfTest(unittest.TestCase):
for arg, actual in zip(args, command.Command.call_args_list):
expected = {
'cmd_prefix': [],
'shell': path.join(path.dirname(self.base), arg[0]),
'shell': os.path.join(os.path.dirname(BASE_DIR), arg[0]),
'args': list(arg[1:]),
'timeout': kwargs.get('timeout', 60)
}
......@@ -198,305 +197,324 @@ class PerfTest(unittest.TestCase):
def testOneRun(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
self.assertEquals(0, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
def testOneRunWithTestFlags(self):
test_input = dict(V8_JSON)
test_input["test_flags"] = ["2", "test_name"]
test_input['test_flags'] = ['2', 'test_name']
self._WriteTestInput(test_input)
self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567"])
self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567'])
self.assertEquals(0, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js",
"--", "2", "test_name")
self._VerifyMock(os.path.join(
'out', 'x64.release', 'd7'), '--flag', 'run.js', '--', '2', 'test_name')
def testTwoRuns_Units_SuiteName(self):
test_input = dict(V8_JSON)
test_input["run_count"] = 2
test_input["name"] = "v8"
test_input["units"] = "ms"
test_input['run_count'] = 2
test_input['name'] = 'v8'
test_input['units'] = 'ms'
self._WriteTestInput(test_input)
self._MockCommand([".", "."],
["Richards: 100\nDeltaBlue: 200\n",
"Richards: 50\nDeltaBlue: 300\n"])
self._MockCommand(['.', '.'],
['Richards: 100\nDeltaBlue: 200\n',
'Richards: 50\nDeltaBlue: 300\n'])
self.assertEquals(0, self._CallMain())
self._VerifyResults("v8", "ms", [
{"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
{"name": "DeltaBlue", "results": ["300.0", "200.0"], "stddev": ""},
self._VerifyResults('v8', 'ms', [
{'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['300.0', '200.0'], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
self._VerifyMock(os.path.join(
'out', 'x64.release', 'd7'), '--flag', 'run.js')
def testTwoRuns_SubRegexp(self):
test_input = dict(V8_JSON)
test_input["run_count"] = 2
del test_input["results_regexp"]
test_input["tests"][0]["results_regexp"] = "^Richards: (.+)$"
test_input["tests"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
test_input['run_count'] = 2
del test_input['results_regexp']
test_input['tests'][0]['results_regexp'] = '^Richards: (.+)$'
test_input['tests'][1]['results_regexp'] = '^DeltaBlue: (.+)$'
self._WriteTestInput(test_input)
self._MockCommand([".", "."],
["Richards: 100\nDeltaBlue: 200\n",
"Richards: 50\nDeltaBlue: 300\n"])
self._MockCommand(['.', '.'],
['Richards: 100\nDeltaBlue: 200\n',
'Richards: 50\nDeltaBlue: 300\n'])
self.assertEquals(0, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
{"name": "DeltaBlue", "results": ["300.0", "200.0"], "stddev": ""},
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['300.0', '200.0'], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
self._VerifyMock(os.path.join(
'out', 'x64.release', 'd7'), '--flag', 'run.js')
def testNestedSuite(self):
self._WriteTestInput(V8_NESTED_SUITES_JSON)
self._MockCommand(["delta_blue", "sub/leaf", "richards"],
["DeltaBlue: 200\n",
"Simple: 1 ms.\n",
"Simple: 2 ms.\n",
"Simple: 3 ms.\n",
"Richards: 100\n",
"Richards: 50\n"])
self._MockCommand(['delta_blue', 'sub/leaf', 'richards'],
['DeltaBlue: 200\n',
'Simple: 1 ms.\n',
'Simple: 2 ms.\n',
'Simple: 3 ms.\n',
'Richards: 100\n',
'Richards: 50\n'])
self.assertEquals(0, self._CallMain())
self.assertEquals([
{"units": "score",
"graphs": ["test", "Richards"],
"results": ["50.0", "100.0"],
"stddev": ""},
{"units": "ms",
"graphs": ["test", "Sub", "Leaf"],
"results": ["3.0", "2.0", "1.0"],
"stddev": ""},
{"units": "score",
"graphs": ["test", "DeltaBlue"],
"results": ["200.0"],
"stddev": ""},
], self._LoadResults()["traces"])
{'units': 'score',
'graphs': ['test', 'Richards'],
'results': ['50.0', '100.0'],
'stddev': ''},
{'units': 'ms',
'graphs': ['test', 'Sub', 'Leaf'],
'results': ['3.0', '2.0', '1.0'],
'stddev': ''},
{'units': 'score',
'graphs': ['test', 'DeltaBlue'],
'results': ['200.0'],
'stddev': ''},
], self._LoadResults()['traces'])
self._VerifyErrors([])
self._VerifyMockMultiple(
(path.join("out", "x64.release", "d7"), "--flag", "run.js"),
(path.join("out", "x64.release", "d7"), "--flag", "run.js"),
(path.join("out", "x64.release", "d8"), "--flag", "run.js"),
(path.join("out", "x64.release", "d8"), "--flag", "run.js"),
(path.join("out", "x64.release", "d8"), "--flag", "run.js"),
(path.join("out", "x64.release", "d8"), "--flag", "--flag2", "run.js"))
(os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
(os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
(os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'),
(os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'),
(os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'),
(os.path.join('out', 'x64.release', 'd8'),
'--flag', '--flag2', 'run.js'))
def testOneRunStdDevRegExp(self):
test_input = dict(V8_JSON)
test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
test_input['stddev_regexp'] = '^%s\-stddev: (.+)$'
self._WriteTestInput(test_input)
self._MockCommand(["."], ["Richards: 1.234\nRichards-stddev: 0.23\n"
"DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n"])
self._MockCommand(['.'], ['Richards: 1.234\nRichards-stddev: 0.23\n'
'DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n'])
self.assertEquals(0, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": "0.23"},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": "106"},
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': '0.23'},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': '106'},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
def testTwoRunsStdDevRegExp(self):
test_input = dict(V8_JSON)
test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
test_input["run_count"] = 2
test_input['stddev_regexp'] = '^%s\-stddev: (.+)$'
test_input['run_count'] = 2
self._WriteTestInput(test_input)
self._MockCommand(["."], ["Richards: 3\nRichards-stddev: 0.7\n"
"DeltaBlue: 6\nDeltaBlue-boom: 0.9\n",
"Richards: 2\nRichards-stddev: 0.5\n"
"DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n"])
self._MockCommand(['.'], ['Richards: 3\nRichards-stddev: 0.7\n'
'DeltaBlue: 6\nDeltaBlue-boom: 0.9\n',
'Richards: 2\nRichards-stddev: 0.5\n'
'DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n'])
self.assertEquals(1, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["2.0", "3.0"], "stddev": "0.7"},
{"name": "DeltaBlue", "results": ["5.0", "6.0"], "stddev": "0.8"},
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['2.0', '3.0'], 'stddev': '0.7'},
{'name': 'DeltaBlue', 'results': ['5.0', '6.0'], 'stddev': '0.8'},
])
self._VerifyErrors(
["Test test/Richards should only run once since a stddev is provided "
"by the test.",
"Test test/DeltaBlue should only run once since a stddev is provided "
"by the test.",
"Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for test "
"test/DeltaBlue."])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
['Test test/Richards should only run once since a stddev is provided '
'by the test.',
'Test test/DeltaBlue should only run once since a stddev is provided '
'by the test.',
'Regexp "^DeltaBlue\-stddev: (.+)$" did not match for test '
'test/DeltaBlue.'])
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
def testBuildbot(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
self.assertEquals(0, self._CallMain("--buildbot"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'])
mock.patch.object(
run_perf.Platform, 'ReadBuildConfig',
mock.MagicMock(return_value={'is_android': False})).start()
self.assertEquals(0, self._CallMain('--buildbot'))
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
def testBuildbotWithTotal(self):
test_input = dict(V8_JSON)
test_input["total"] = True
test_input['total'] = True
self._WriteTestInput(test_input)
self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
self.assertEquals(0, self._CallMain("--buildbot"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
{"name": "Total", "results": ["3626.49109719"], "stddev": ""},
self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'])
mock.patch.object(
run_perf.Platform, 'ReadBuildConfig',
mock.MagicMock(return_value={'is_android': False})).start()
self.assertEquals(0, self._CallMain('--buildbot'))
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
{'name': 'Total', 'results': ['3626.49109719'], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
def testBuildbotWithTotalAndErrors(self):
test_input = dict(V8_JSON)
test_input["total"] = True
test_input['total'] = True
self._WriteTestInput(test_input)
self._MockCommand(["."], ["x\nRichards: bla\nDeltaBlue: 10657567\ny\n"])
self.assertEquals(1, self._CallMain("--buildbot"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": [], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
self._MockCommand(['.'], ['x\nRichards: bla\nDeltaBlue: 10657567\ny\n'])
mock.patch.object(
run_perf.Platform, 'ReadBuildConfig',
mock.MagicMock(return_value={'is_android': False})).start()
self.assertEquals(1, self._CallMain('--buildbot'))
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': [], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
self._VerifyErrors(
["Regexp \"^Richards: (.+)$\" "
"returned a non-numeric for test test/Richards.",
"Not all traces have the same number of results."])
self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
['Regexp "^Richards: (.+)$" '
'returned a non-numeric for test test/Richards.',
'Not all traces have the same number of results.'])
self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
def testRegexpNoMatch(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
self._MockCommand(['.'], ['x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n'])
self.assertEquals(1, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": [], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': [], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
self._VerifyErrors(
["Regexp \"^Richards: (.+)$\" didn't match for test test/Richards."])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
['Regexp "^Richards: (.+)$" did not match for test test/Richards.'])
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
def testOneRunGeneric(self):
test_input = dict(V8_GENERIC_JSON)
self._WriteTestInput(test_input)
self._MockCommand(["."], [
"RESULT Infra: Constant1= 11 count\n"
"RESULT Infra: Constant2= [10,5,10,15] count\n"
"RESULT Infra: Constant3= {12,1.2} count\n"
"RESULT Infra: Constant4= [10,5,error,15] count\n"])
self._MockCommand(['.'], [
'RESULT Infra: Constant1= 11 count\n'
'RESULT Infra: Constant2= [10,5,10,15] count\n'
'RESULT Infra: Constant3= {12,1.2} count\n'
'RESULT Infra: Constant4= [10,5,error,15] count\n'])
self.assertEquals(1, self._CallMain())
self.assertEquals([
{"units": "count",
"graphs": ["test", "Infra", "Constant1"],
"results": ["11.0"],
"stddev": ""},
{"units": "count",
"graphs": ["test", "Infra", "Constant2"],
"results": ["10.0", "5.0", "10.0", "15.0"],
"stddev": ""},
{"units": "count",
"graphs": ["test", "Infra", "Constant3"],
"results": ["12.0"],
"stddev": "1.2"},
{"units": "count",
"graphs": ["test", "Infra", "Constant4"],
"results": [],
"stddev": ""},
], self._LoadResults()["traces"])
self._VerifyErrors(["Found non-numeric in test/Infra/Constant4"])
self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "")
{'units': 'count',
'graphs': ['test', 'Infra', 'Constant1'],
'results': ['11.0'],
'stddev': ''},
{'units': 'count',
'graphs': ['test', 'Infra', 'Constant2'],
'results': ['10.0', '5.0', '10.0', '15.0'],
'stddev': ''},
{'units': 'count',
'graphs': ['test', 'Infra', 'Constant3'],
'results': ['12.0'],
'stddev': '1.2'},
{'units': 'count',
'graphs': ['test', 'Infra', 'Constant4'],
'results': [],
'stddev': ''},
], self._LoadResults()['traces'])
self._VerifyErrors(['Found non-numeric in test/Infra/Constant4'])
self._VerifyMock(os.path.join('out', 'x64.release', 'cc'), '--flag', '')
def testOneRunCrashed(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(
["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"], exit_code=1)
['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'], exit_code=1)
self.assertEquals(1, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": [], "stddev": ""},
{"name": "DeltaBlue", "results": [], "stddev": ""},
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': [], 'stddev': ''},
{'name': 'DeltaBlue', 'results': [], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
def testOneRunTimingOut(self):
test_input = dict(V8_JSON)
test_input["timeout"] = 70
test_input['timeout'] = 70
self._WriteTestInput(test_input)
self._MockCommand(["."], [""], timed_out=True)
self._MockCommand(['.'], [''], timed_out=True)
self.assertEquals(1, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": [], "stddev": ""},
{"name": "DeltaBlue", "results": [], "stddev": ""},
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': [], 'stddev': ''},
{'name': 'DeltaBlue', 'results': [], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(
path.join("out", "x64.release", "d7"), "--flag", "run.js", timeout=70)
self._VerifyMock(os.path.join('out', 'x64.release', 'd7'),
'--flag', 'run.js', timeout=70)
# Simple test that mocks out the android platform. Testing the platform would
# require lots of complicated mocks for the android tools.
def testAndroid(self):
self._WriteTestInput(V8_JSON)
# FIXME(machenbach): This is not test-local!
platform = run_perf.AndroidPlatform
platform.PreExecution = MagicMock(return_value=None)
platform.PostExecution = MagicMock(return_value=None)
platform.PreTests = MagicMock(return_value=None)
platform.Run = MagicMock(
return_value=("Richards: 1.234\nDeltaBlue: 10657567\n", None))
run_perf.AndroidPlatform = MagicMock(return_value=platform)
with patch.object(run_perf.Platform, 'ReadBuildConfig',
MagicMock(return_value={'is_android': True})):
self.assertEquals(0, self._CallMain("--arch", "arm"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
mock.patch('run_perf.AndroidPlatform.PreExecution').start()
mock.patch('run_perf.AndroidPlatform.PostExecution').start()
mock.patch('run_perf.AndroidPlatform.PreTests').start()
mock.patch(
'run_perf.AndroidPlatform.Run',
return_value=(
'Richards: 1.234\nDeltaBlue: 10657567\n', None)).start()
mock.patch('testrunner.local.android._Driver', autospec=True).start()
mock.patch(
'run_perf.Platform.ReadBuildConfig',
return_value={'is_android': True}).start()
self.assertEquals(0, self._CallMain('--arch', 'arm'))
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
def testTwoRuns_Trybot(self):
test_input = dict(V8_JSON)
test_input["run_count"] = 2
test_input['run_count'] = 2
self._WriteTestInput(test_input)
self._MockCommand([".", ".", ".", "."],
["Richards: 100\nDeltaBlue: 200\n",
"Richards: 200\nDeltaBlue: 20\n",
"Richards: 50\nDeltaBlue: 200\n",
"Richards: 100\nDeltaBlue: 20\n"])
test_output_secondary = path.join(TEST_WORKSPACE, "results_secondary.json")
self._MockCommand(['.', '.', '.', '.'],
['Richards: 100\nDeltaBlue: 200\n',
'Richards: 200\nDeltaBlue: 20\n',
'Richards: 50\nDeltaBlue: 200\n',
'Richards: 100\nDeltaBlue: 20\n'])
test_output_secondary = os.path.join(
TEST_WORKSPACE, 'results_secondary.json')
self.assertEquals(0, self._CallMain(
"--outdir-secondary", "out-secondary",
"--json-test-results-secondary", test_output_secondary,
'--outdir-secondary', 'out-secondary',
'--json-test-results-secondary', test_output_secondary,
))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["100.0", "200.0"], "stddev": ""},
{"name": "DeltaBlue", "results": ["20.0", "20.0"], "stddev": ""},
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['100.0', '200.0'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['20.0', '20.0'], 'stddev': ''},
])
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
{"name": "DeltaBlue", "results": ["200.0", "200.0"], "stddev": ""},
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['200.0', '200.0'], 'stddev': ''},
], test_output_secondary)
self._VerifyErrors([])
self._VerifyMockMultiple(
(path.join("out", "x64.release", "d7"), "--flag", "run.js"),
(path.join("out-secondary", "x64.release", "d7"), "--flag", "run.js"),
(path.join("out", "x64.release", "d7"), "--flag", "run.js"),
(path.join("out-secondary", "x64.release", "d7"), "--flag", "run.js"),
(os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
(os.path.join('out-secondary', 'x64.release', 'd7'),
'--flag', 'run.js'),
(os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
(os.path.join('out-secondary', 'x64.release', 'd7'),
'--flag', 'run.js'),
)
def testWrongBinaryWithProf(self):
test_input = dict(V8_JSON)
self._WriteTestInput(test_input)
self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
self.assertEquals(0, self._CallMain("--extra-flags=--prof"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
self.assertEquals(0, self._CallMain('--extra-flags=--prof'))
self._VerifyResults('test', 'score', [
{'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
{'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"),
"--flag", "--prof", "run.js")
self._VerifyMock(os.path.join('out', 'x64.release', 'd7'),
'--flag', '--prof', 'run.js')
def testUnzip(self):
def Gen():
......@@ -510,18 +528,18 @@ class PerfTest(unittest.TestCase):
### System tests
def _RunPerf(self, mocked_d8, test_json):
output_json = path.join(TEST_WORKSPACE, "output.json")
output_json = os.path.join(TEST_WORKSPACE, 'output.json')
args = [
sys.executable, RUN_PERF,
"--binary-override-path", os.path.join(TEST_DATA, mocked_d8),
"--json-test-results", output_json,
os.sys.executable, RUN_PERF,
'--binary-override-path', os.path.join(TEST_DATA, mocked_d8),
'--json-test-results', output_json,
os.path.join(TEST_DATA, test_json),
]
subprocess.check_output(args)
return self._LoadResults(output_json)
def testNormal(self):
results = self._RunPerf("d8_mocked1.py", "test1.json")
results = self._RunPerf('d8_mocked1.py', 'test1.json')
self.assertEquals([], results['errors'])
self.assertEquals([
{
......@@ -539,7 +557,7 @@ class PerfTest(unittest.TestCase):
], results['traces'])
def testResultsProcessor(self):
results = self._RunPerf("d8_mocked2.py", "test2.json")
results = self._RunPerf('d8_mocked2.py', 'test2.json')
self.assertEquals([], results['errors'])
self.assertEquals([
{
......@@ -557,7 +575,7 @@ class PerfTest(unittest.TestCase):
], results['traces'])
def testResultsProcessorNested(self):
results = self._RunPerf("d8_mocked2.py", "test3.json")
results = self._RunPerf('d8_mocked2.py', 'test3.json')
self.assertEquals([], results['errors'])
self.assertEquals([
{
......
......@@ -658,6 +658,7 @@ def PyTests(workspace):
join(workspace, 'tools', 'clusterfuzz', 'v8_foozzie_test.py'),
join(workspace, 'tools', 'release', 'test_scripts.py'),
join(workspace, 'tools', 'unittests', 'run_tests_test.py'),
join(workspace, 'tools', 'unittests', 'run_perf_test.py'),
join(workspace, 'tools', 'testrunner', 'testproc', 'variant_unittest.py'),
]:
print('Running ' + script)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment