Commit 9733dacc authored by Liviu Rau's avatar Liviu Rau Committed by V8 LUCI CQ

[test] Refactor testrunner (3)

- removed some 'feature envy' instances:
  - created a AugmentedOptions class to take care of non trivial option look-ups
  - moved some calculation closer the the actual data
- promoted parameter that was passed around a lot (options) to filed in the offending class
- similar object creation looks a bit more similar
- CommandContext provides a wrapper that ensures resource de-allocation
- separate tests from standard and num_fuzz runners
- added a couple of more tests to improve coverage

This is still in flux. But further changes risk creating a disconnect between the original implementation and further refactorings.

Bug: v8:12785
Change-Id: I0ec2e9c6a81604a5cd1d4a80982dd3329c1b48db
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3686411
Commit-Queue: Liviu Rau <liviurau@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81148}
parent 28fe6582
......@@ -20,11 +20,9 @@ from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.test_config import TestConfig
from testrunner.testproc import progress
from testrunner.testproc.rerun import RerunProc
from testrunner.testproc.shard import ShardProc
from testrunner.testproc.sigproc import SignalProc
from testrunner.testproc.timeout import TimeoutProc
from testrunner.testproc import util
from testrunner.utils.augmented_options import AugmentedOptions
DEFAULT_OUT_GN = 'out.gn'
......@@ -192,67 +190,48 @@ class BuildConfig(object):
return (self.asan or self.cfi_vptr or self.msan or self.tsan or
self.ubsan_vptr)
def __str__(self):
detected_options = []
if self.asan:
detected_options.append('asan')
if self.cfi_vptr:
detected_options.append('cfi_vptr')
if self.control_flow_integrity:
detected_options.append('control_flow_integrity')
if self.dcheck_always_on:
detected_options.append('dcheck_always_on')
if self.gcov_coverage:
detected_options.append('gcov_coverage')
if self.msan:
detected_options.append('msan')
if self.no_i18n:
detected_options.append('no_i18n')
if self.predictable:
detected_options.append('predictable')
if self.tsan:
detected_options.append('tsan')
if self.ubsan_vptr:
detected_options.append('ubsan_vptr')
if self.verify_csa:
detected_options.append('verify_csa')
if self.lite_mode:
detected_options.append('lite_mode')
if self.pointer_compression:
detected_options.append('pointer_compression')
if self.pointer_compression_shared_cage:
detected_options.append('pointer_compression_shared_cage')
if self.sandbox:
detected_options.append('sandbox')
if self.third_party_heap:
detected_options.append('third_party_heap')
if self.webassembly:
detected_options.append('webassembly')
if self.dict_property_const_tracking:
detected_options.append('dict_property_const_tracking')
def timeout_scalefactor(self, initial_factor):
"""Increases timeout for slow build configurations."""
factors = dict(
lite_mode = 2,
predictable = 4,
tsan = 2,
use_sanitizer = 1.5,
is_full_debug = 4,
)
result = initial_factor
for k,v in factors.items():
if getattr(self, k, False):
result *= v
if self.arch in SLOW_ARCHS:
result *= 4.5
return result
def __str__(self):
attrs = [
'asan',
'cfi_vptr',
'control_flow_integrity',
'dcheck_always_on',
'gcov_coverage',
'msan',
'no_i18n',
'predictable',
'tsan',
'ubsan_vptr',
'verify_csa',
'lite_mode',
'pointer_compression',
'pointer_compression_shared_cage',
'sandbox',
'third_party_heap',
'webassembly',
'dict_property_const_tracking',
]
detected_options = [attr for attr in attrs if getattr(self, attr, False)]
return '\n'.join(detected_options)
def _do_load_build_config(outdir, verbose=False):
build_config_path = os.path.join(outdir, "v8_build_config.json")
if not os.path.exists(build_config_path):
if verbose:
print("Didn't find build config: %s" % build_config_path)
raise TestRunnerError()
with open(build_config_path) as f:
try:
build_config_json = json.load(f)
except Exception: # pragma: no cover
print("%s exists but contains invalid json. Is your build up-to-date?"
% build_config_path)
raise TestRunnerError()
return BuildConfig(build_config_json)
class BaseTestRunner(object):
def __init__(self, basedir=None):
self.v8_root = up(up(up(__file__)))
......@@ -262,52 +241,54 @@ class BaseTestRunner(object):
self.mode_options = None
self.target_os = None
self.infra_staging = False
self.options = None
@property
def framework_name(self):
"""String name of the base-runner subclass, used in test results."""
raise NotImplementedError()
raise NotImplementedError() # pragma: no cover
def execute(self, sys_args=None):
if sys_args is None: # pragma: no cover
sys_args = sys.argv[1:]
try:
parser = self._create_parser()
options, args = self._parse_args(parser, sys_args)
self.infra_staging = options.infra_staging
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
print(' '.join(sys.argv))
# TODO(machenbach): Print used Python version until we have switched to
# Python3 everywhere.
print('Running with:')
print(sys.version)
# Kill stray processes from previous tasks on swarming.
util.kill_processes_linux()
self._load_build_config(options)
command.setup(self.target_os, options.device)
parser = self._create_parser()
self.options, args = self._parse_args(parser, sys_args)
self.infra_staging = self.options.infra_staging
if self.options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
print(' '.join(sys.argv))
# TODO(machenbach): Print used Python version until we have switched to
# Python3 everywhere.
print('Running with:')
print(sys.version)
# Kill stray processes from previous tasks on swarming.
util.kill_processes_linux()
try:
self._load_build_config()
try:
self._process_default_options(options)
self._process_options(options)
self._process_default_options()
self._process_options()
except TestRunnerError:
parser.print_help()
raise
args = self._parse_test_args(args)
tests = self._load_testsuite_generators(args, options)
self._setup_env()
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_options.label))
exit_code = self._do_execute(tests, args, options)
if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
print("Force exit code 0 after failures. Json test results file "
"generated with failure information.")
exit_code = utils.EXIT_CODE_PASS
with command.command_context(self.target_os, self.options.device):
names = self._args_to_suite_names(args)
tests = self._load_testsuite_generators(names)
self._setup_env()
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_options.label))
exit_code = self._do_execute(tests, args)
if exit_code == utils.EXIT_CODE_FAILURES and self.options.json_test_results:
print("Force exit code 0 after failures. Json test results file "
"generated with failure information.")
exit_code = utils.EXIT_CODE_PASS
return exit_code
except TestRunnerError:
traceback.print_exc()
......@@ -317,8 +298,7 @@ class BaseTestRunner(object):
except Exception:
traceback.print_exc()
return utils.EXIT_CODE_INTERNAL_ERROR
finally:
command.tear_down()
def _create_parser(self):
parser = optparse.OptionParser()
......@@ -423,7 +403,7 @@ class BaseTestRunner(object):
"setting this option indicates manual usage.")
def _add_parser_options(self, parser):
pass
pass # pragma: no cover
def _parse_args(self, parser, sys_args):
options, args = parser.parse_args(sys_args)
......@@ -432,12 +412,12 @@ class BaseTestRunner(object):
print('Multiple architectures are deprecated')
raise TestRunnerError()
return options, args
return AugmentedOptions.augment(options), args
def _load_build_config(self, options):
for outdir in self._possible_outdirs(options):
def _load_build_config(self):
for outdir in self._possible_outdirs():
try:
self.build_config = _do_load_build_config(outdir, options.verbose)
self.build_config = self._do_load_build_config(outdir)
# In auto-detect mode the outdir is always where we found the build config.
# This ensures that we'll also take the build products from there.
......@@ -462,20 +442,37 @@ class BaseTestRunner(object):
else:
self.target_os = utils.GuessOS()
def _do_load_build_config(self, outdir):
build_config_path = os.path.join(outdir, "v8_build_config.json")
if not os.path.exists(build_config_path):
if self.options.verbose:
print("Didn't find build config: %s" % build_config_path)
raise TestRunnerError()
with open(build_config_path) as f:
try:
build_config_json = json.load(f)
except Exception: # pragma: no cover
print("%s exists but contains invalid json. Is your build up-to-date?"
% build_config_path)
raise TestRunnerError()
return BuildConfig(build_config_json)
# Returns possible build paths in order:
# gn
# outdir
# outdir on bots
def _possible_outdirs(self, options):
def _possible_outdirs(self):
def outdirs():
if options.gn:
if self.options.gn:
yield self._get_gn_outdir()
return
yield options.outdir
yield self.options.outdir
if os.path.basename(options.outdir) != 'build':
yield os.path.join(options.outdir, 'build')
if os.path.basename(self.options.outdir) != 'build':
yield os.path.join(self.options.outdir, 'build')
for outdir in outdirs():
yield os.path.join(self.basedir, outdir)
......@@ -495,7 +492,7 @@ class BaseTestRunner(object):
print(">>> Latest GN build found: %s" % latest_config)
return os.path.join(DEFAULT_OUT_GN, latest_config)
def _process_default_options(self, options):
def _process_default_options(self):
if self.build_config.is_debug:
self.mode_options = DEBUG_MODE
elif self.build_config.dcheck_always_on:
......@@ -503,27 +500,27 @@ class BaseTestRunner(object):
else:
self.mode_options = RELEASE_MODE
if options.arch and options.arch != self.build_config.arch:
if self.options.arch and self.options.arch != self.build_config.arch:
print('--arch value (%s) inconsistent with build config (%s).' % (
options.arch, self.build_config.arch))
self.options.arch, self.build_config.arch))
raise TestRunnerError()
if options.shell_dir: # pragma: no cover
if self.options.shell_dir: # pragma: no cover
print('Warning: --shell-dir is deprecated. Searching for executables in '
'build directory (%s) instead.' % self.outdir)
if options.j == 0:
if self.options.j == 0:
if self.build_config.is_android:
# Adb isn't happy about multi-processed file pushing.
options.j = 1
self.options.j = 1
else:
options.j = multiprocessing.cpu_count()
self.options.j = multiprocessing.cpu_count()
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = sum(list(map(shlex.split, options.extra_flags)), [])
self.options.command_prefix = shlex.split(self.options.command_prefix)
self.options.extra_flags = sum(list(map(shlex.split, self.options.extra_flags)), [])
def _process_options(self, options):
pass
def _process_options(self):
pass # pragma: no cover
def _setup_env(self):
# Use the v8 root as cwd as some test cases use "load" with relative paths.
......@@ -609,41 +606,40 @@ class BaseTestRunner(object):
return reduce(list.__add__, list(map(expand_test_group, args)), [])
def _args_to_suite_names(self, args, test_root):
def _args_to_suite_names(self, args):
# Use default tests if no test configuration was provided at the cmd line.
all_names = set(utils.GetSuitePaths(test_root))
all_names = set(utils.GetSuitePaths(self.options.test_root))
args_names = OrderedDict([(arg.split('/')[0], None) for arg in args]) # set
return [name for name in args_names if name in all_names]
def _get_default_suite_names(self):
return []
return [] # pragma: no cover
def _load_testsuite_generators(self, args, options):
names = self._args_to_suite_names(args, options.test_root)
test_config = self._create_test_config(options)
variables = self._get_statusfile_variables(options)
def _load_testsuite_generators(self, names):
test_config = self._create_test_config()
variables = self._get_statusfile_variables()
# Head generator with no elements
test_chain = testsuite.TestGenerator(0, [], [])
for name in names:
if options.verbose:
if self.options.verbose:
print('>>> Loading test suite: %s' % name)
suite = testsuite.TestSuite.Load(
os.path.join(options.test_root, name), test_config,
os.path.join(self.options.test_root, name), test_config,
self.framework_name)
if self._is_testsuite_supported(suite, options):
if self._is_testsuite_supported(suite):
tests = suite.load_tests_from_disk(variables)
test_chain.merge(tests)
return test_chain
def _is_testsuite_supported(self, suite, options):
def _is_testsuite_supported(self, suite):
"""A predicate that can be overridden to filter out unsupported TestSuite
instances (see NumFuzzer for usage)."""
return True
def _get_statusfile_variables(self, options):
def _get_statusfile_variables(self):
simd_mips = (
self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
self.build_config.mips_arch_variant == "r6" and
......@@ -654,7 +650,7 @@ class BaseTestRunner(object):
self.build_config.mips_arch_variant)
no_simd_hardware = any(
i in options.extra_flags for i in ['--noenable-sse3',
i in self.options.extra_flags for i in ['--noenable-sse3',
'--no-enable-sse3',
'--noenable-ssse3',
'--no-enable-ssse3',
......@@ -696,21 +692,21 @@ class BaseTestRunner(object):
"gc_stress": False,
"gcov_coverage": self.build_config.gcov_coverage,
"has_webassembly": self.build_config.webassembly,
"isolates": options.isolates,
"isolates": self.options.isolates,
"is_clang": self.build_config.is_clang,
"is_full_debug": self.build_config.is_full_debug,
"mips_arch_variant": mips_arch_variant,
"mode": self.mode_options.status_mode,
"msan": self.build_config.msan,
"no_harness": options.no_harness,
"no_harness": self.options.no_harness,
"no_i18n": self.build_config.no_i18n,
"no_simd_hardware": no_simd_hardware,
"novfp3": False,
"optimize_for_size": "--optimize-for-size" in options.extra_flags,
"optimize_for_size": "--optimize-for-size" in self.options.extra_flags,
"predictable": self.build_config.predictable,
"simd_mips": simd_mips,
"simulator_run": self.build_config.simulator_run and
not options.dont_skip_simulator_slow_tests,
not self.options.dont_skip_simulator_slow_tests,
"system": self.target_os,
"third_party_heap": self.build_config.third_party_heap,
"tsan": self.build_config.tsan,
......@@ -728,46 +724,29 @@ class BaseTestRunner(object):
def _runner_flags(self):
"""Extra default flags specific to the test runner implementation."""
return []
return [] # pragma: no cover
def _create_test_config(self, options):
timeout = options.timeout * self._timeout_scalefactor(options)
def _create_test_config(self):
timeout = self.build_config.timeout_scalefactor(
self.options.timeout * self.mode_options.timeout_scalefactor)
return TestConfig(
command_prefix=options.command_prefix,
extra_flags=options.extra_flags,
isolates=options.isolates,
command_prefix=self.options.command_prefix,
extra_flags=self.options.extra_flags,
isolates=self.options.isolates,
mode_flags=self.mode_options.flags + self._runner_flags(),
no_harness=options.no_harness,
no_harness=self.options.no_harness,
noi18n=self.build_config.no_i18n,
random_seed=options.random_seed,
run_skipped=options.run_skipped,
random_seed=self.options.random_seed,
run_skipped=self.options.run_skipped,
shell_dir=self.outdir,
timeout=timeout,
verbose=options.verbose,
regenerate_expected_files=options.regenerate_expected_files,
verbose=self.options.verbose,
regenerate_expected_files=self.options.regenerate_expected_files,
)
def _timeout_scalefactor(self, options):
"""Increases timeout for slow build configurations."""
factor = self.mode_options.timeout_scalefactor
if self.build_config.arch in SLOW_ARCHS:
factor *= 4.5
if self.build_config.lite_mode:
factor *= 2
if self.build_config.predictable:
factor *= 4
if self.build_config.tsan:
factor *= 2
if self.build_config.use_sanitizer:
factor *= 1.5
if self.build_config.is_full_debug:
factor *= 4
return factor
# TODO(majeski): remove options & args parameters
def _do_execute(self, suites, args, options):
raise NotImplementedError()
def _do_execute(self, suites, args):
raise NotImplementedError() # pragma: no coverage
def _prepare_procs(self, procs):
procs = list([_f for _f in procs if _f])
......@@ -775,78 +754,16 @@ class BaseTestRunner(object):
procs[i].connect_to(procs[i + 1])
procs[0].setup()
def _create_shard_proc(self, options):
myid, count = self._get_shard_info(options)
if count == 1:
return None
return ShardProc(myid - 1, count)
def _get_shard_info(self, options):
"""
Returns pair:
(id of the current shard [1; number of shards], number of shards)
"""
# Read gtest shard configuration from environment (e.g. set by swarming).
# If none is present, use values passed on the command line.
shard_count = int(
os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
shard_run = os.environ.get('GTEST_SHARD_INDEX')
if shard_run is not None:
# The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
shard_run = int(shard_run) + 1
else:
shard_run = options.shard_run
if options.shard_count > 1:
# Log if a value was passed on the cmd line and it differs from the
# environment variables.
if options.shard_count != shard_count: # pragma: no cover
print("shard_count from cmd line differs from environment variable "
"GTEST_TOTAL_SHARDS")
if (options.shard_run > 1 and
options.shard_run != shard_run): # pragma: no cover
print("shard_run from cmd line differs from environment variable "
"GTEST_SHARD_INDEX")
if shard_run < 1 or shard_run > shard_count:
# TODO(machenbach): Turn this into an assert. If that's wrong on the
# bots, printing will be quite useless. Or refactor this code to make
# sure we get a return code != 0 after testing if we got here.
print("shard-run not a valid number, should be in [1:shard-count]")
print("defaulting back to running all tests")
return 1, 1
return shard_run, shard_count
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
if options.json_test_results:
def _create_progress_indicators(self, test_count):
procs = [PROGRESS_INDICATORS[self.options.progress]()]
if self.options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(self.framework_name))
for proc in procs:
proc.configure(options)
for proc in procs:
try:
proc.set_test_count(test_count)
except AttributeError:
pass
proc.configure(self.options)
proc.set_test_count(test_count)
return procs
def _create_result_tracker(self, options):
return progress.ResultsTracker(options.exit_after_n_failures)
def _create_timeout_proc(self, options):
if not options.total_timeout_sec:
return None
return TimeoutProc(options.total_timeout_sec)
def _create_signal_proc(self):
return SignalProc()
def _create_rerun_proc(self, options):
if not options.rerun_failures_count:
return None
return RerunProc(options.rerun_failures_count,
options.rerun_failures_max)
......@@ -327,6 +327,40 @@ class AndroidCommand(BaseCommand):
Command = None
class CommandContext():
def __init__(self, command):
self.command = command
@contextmanager
def context(self, device):
yield
class AndroidContext():
def __init__(self):
self.command = AndroidCommand
@contextmanager
def context(self, device):
try:
AndroidCommand.driver = android_driver(device)
yield
finally:
AndroidCommand.driver.tear_down()
@contextmanager
def command_context(target_os, device):
factory = dict(
android=AndroidContext(),
windows=CommandContext(WindowsCommand),
)
context = factory.get(target_os, CommandContext(PosixCommand))
with context.context(device):
global Command
Command = context.command
yield
# Deprecated : use command_context
def setup(target_os, device):
"""Set the Command class to the OS-specific version."""
global Command
......@@ -338,6 +372,7 @@ def setup(target_os, device):
else:
Command = PosixCommand
# Deprecated : use command_context
def tear_down():
"""Clean up after using commands."""
if Command == AndroidCommand:
......
......@@ -4,7 +4,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
import sys
# Adds testrunner to the path hence it has to be imported at the beggining.
......@@ -19,6 +18,10 @@ from testrunner.testproc.expectation import ExpectationProc
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc
from testrunner.utils import random_utils
from testrunner.testproc.rerun import RerunProc
from testrunner.testproc.timeout import TimeoutProc
from testrunner.testproc.progress import ResultsTracker
from testrunner.testproc.shard import ShardProc
DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
......@@ -97,20 +100,20 @@ class NumFuzzer(base_runner.BaseTestRunner):
return parser
def _process_options(self, options):
if not options.fuzzer_random_seed:
options.fuzzer_random_seed = random_utils.random_seed()
def _process_options(self):
if not self.options.fuzzer_random_seed:
self.options.fuzzer_random_seed = random_utils.random_seed()
if options.total_timeout_sec:
options.tests_count = 0
if self.options.total_timeout_sec:
self.options.tests_count = 0
if options.combine_tests:
if options.combine_min > options.combine_max:
if self.options.combine_tests:
if self.options.combine_min > self.options.combine_max:
print(('min_group_size (%d) cannot be larger than max_group_size (%d)' %
options.min_group_size, options.max_group_size))
self.options.min_group_size, self.options.max_group_size))
raise base_runner.TestRunnerError()
if options.variants != 'default':
if self.options.variants != 'default':
print ('Only default testing variant is supported with numfuzz')
raise base_runner.TestRunnerError()
......@@ -125,56 +128,54 @@ class NumFuzzer(base_runner.BaseTestRunner):
'--exit-on-contradictory-flags', '--testing-d8-test-runner', '--no-fail'
]
def _get_statusfile_variables(self, options):
def _get_statusfile_variables(self):
variables = (
super(NumFuzzer, self)._get_statusfile_variables(options))
super(NumFuzzer, self)._get_statusfile_variables())
variables.update({
'deopt_fuzzer': bool(options.stress_deopt),
'endurance_fuzzer': bool(options.combine_tests),
'gc_stress': bool(options.stress_gc),
'gc_fuzzer': bool(max([options.stress_marking,
options.stress_scavenge,
options.stress_compaction,
options.stress_gc,
options.stress_delay_tasks,
options.stress_stack_size,
options.stress_thread_pool_size])),
'deopt_fuzzer': bool(self.options.stress_deopt),
'endurance_fuzzer': bool(self.options.combine_tests),
'gc_stress': bool(self.options.stress_gc),
'gc_fuzzer': bool(max([self.options.stress_marking,
self.options.stress_scavenge,
self.options.stress_compaction,
self.options.stress_gc,
self.options.stress_delay_tasks,
self.options.stress_stack_size,
self.options.stress_thread_pool_size])),
})
return variables
def _do_execute(self, tests, args, options):
def _do_execute(self, tests, args):
loader = LoadProc(tests)
fuzzer_rng = random.Random(options.fuzzer_random_seed)
combiner = self._create_combiner(fuzzer_rng, options)
results = self._create_result_tracker(options)
execproc = ExecutionProc(options.j)
combiner = CombinerProc.create(self.options)
results = ResultsTracker.create(self.options)
execproc = ExecutionProc(self.options.j)
sigproc = self._create_signal_proc()
indicators = self._create_progress_indicators(
tests.test_count_estimate, options)
tests.test_count_estimate)
procs = [
loader,
NameFilterProc(args) if args else None,
StatusFileFilterProc(None, None),
# TODO(majeski): Improve sharding when combiner is present. Maybe select
# different random seeds for shards instead of splitting tests.
self._create_shard_proc(options),
ShardProc.create(self.options),
ExpectationProc(),
combiner,
self._create_fuzzer(fuzzer_rng, options),
fuzzer.FuzzerProc.create(self.options),
sigproc,
] + indicators + [
results,
self._create_timeout_proc(options),
self._create_rerun_proc(options),
TimeoutProc.create(self.options),
RerunProc.create(self.options),
execproc,
]
self._prepare_procs(procs)
loader.load_initial_tests(initial_batch_size=float('inf'))
loader.load_initial_tests()
# TODO(majeski): maybe some notification from loader would be better?
if combiner:
combiner.generate_initial_tests(options.j * 4)
combiner.generate_initial_tests(self.options.j * 4)
# This starts up worker processes and blocks until all tests are
# processed.
......@@ -190,48 +191,9 @@ class NumFuzzer(base_runner.BaseTestRunner):
# Indicate if a SIGINT or SIGTERM happened.
return sigproc.exit_code
def _is_testsuite_supported(self, suite, options):
return not options.combine_tests or suite.test_combiner_available()
def _create_combiner(self, rng, options):
if not options.combine_tests:
return None
return CombinerProc(rng, options.combine_min, options.combine_max,
options.tests_count)
def _create_fuzzer(self, rng, options):
return fuzzer.FuzzerProc(
rng,
self._tests_count(options),
self._create_fuzzer_configs(options),
self._disable_analysis(options),
)
def _tests_count(self, options):
if options.combine_tests:
return 1
return options.tests_count
def _disable_analysis(self, options):
"""Disable analysis phase when options are used that don't support it."""
return options.combine_tests
def _create_fuzzer_configs(self, options):
fuzzers = []
def add(name, prob, *args):
if prob:
fuzzers.append(fuzzer.create_fuzzer_config(name, prob, *args))
add('compaction', options.stress_compaction)
add('marking', options.stress_marking)
add('scavenge', options.stress_scavenge)
add('gc_interval', options.stress_gc)
add('stack', options.stress_stack_size)
add('threads', options.stress_thread_pool_size)
add('delay', options.stress_delay_tasks)
add('deopt', options.stress_deopt, options.stress_deopt_min)
return fuzzers
def _is_testsuite_supported(self, suite):
return not self.options.combine_tests or suite.test_combiner_available()
if __name__ == '__main__':
sys.exit(NumFuzzer().execute())
sys.exit(NumFuzzer().execute()) # pragma: no cover
......@@ -11,8 +11,15 @@ import json
import os
import sys
import tempfile
from testrunner.testproc.rerun import RerunProc
from testrunner.testproc.timeout import TimeoutProc
from testrunner.testproc.progress import ResultsTracker
from testrunner.testproc.shard import ShardProc
# Adds testrunner to the path hence it has to be imported at the beggining.
TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(TOOLS_PATH)
import testrunner.base_runner as base_runner
from testrunner.local import utils
......@@ -147,85 +154,85 @@ class StandardTestRunner(base_runner.BaseTestRunner):
def _predictable_wrapper(self):
return os.path.join(self.v8_root, 'tools', 'predictable_wrapper.py')
def _process_options(self, options):
if options.sancov_dir:
self.sancov_dir = options.sancov_dir
def _process_options(self):
if self.options.sancov_dir:
self.sancov_dir = self.options.sancov_dir
if not os.path.exists(self.sancov_dir):
print('sancov-dir %s doesn\'t exist' % self.sancov_dir)
raise base_runner.TestRunnerError()
if options.gc_stress:
options.extra_flags += GC_STRESS_FLAGS
if self.options.gc_stress:
self.options.extra_flags += GC_STRESS_FLAGS
if options.random_gc_stress:
options.extra_flags += RANDOM_GC_STRESS_FLAGS
if self.options.random_gc_stress:
self.options.extra_flags += RANDOM_GC_STRESS_FLAGS
if self.build_config.asan:
options.extra_flags.append('--invoke-weak-callbacks')
self.options.extra_flags.append('--invoke-weak-callbacks')
if options.novfp3:
options.extra_flags.append('--noenable-vfp3')
if self.options.novfp3:
self.options.extra_flags.append('--noenable-vfp3')
if options.no_variants: # pragma: no cover
if self.options.no_variants: # pragma: no cover
print ('Option --no-variants is deprecated. '
'Pass --variants=default instead.')
assert not options.variants
options.variants = 'default'
assert not self.options.variants
self.options.variants = 'default'
if options.exhaustive_variants: # pragma: no cover
if self.options.exhaustive_variants: # pragma: no cover
# TODO(machenbach): Switch infra to --variants=exhaustive after M65.
print ('Option --exhaustive-variants is deprecated. '
'Pass --variants=exhaustive instead.')
# This is used on many bots. It includes a larger set of default
# variants.
# Other options for manipulating variants still apply afterwards.
assert not options.variants
options.variants = 'exhaustive'
assert not self.options.variants
self.options.variants = 'exhaustive'
if options.quickcheck:
assert not options.variants
options.variants = 'stress,default'
options.slow_tests = 'skip'
options.pass_fail_tests = 'skip'
if self.options.quickcheck:
assert not self.options.variants
self.options.variants = 'stress,default'
self.options.slow_tests = 'skip'
self.options.pass_fail_tests = 'skip'
if self.build_config.predictable:
options.variants = 'default'
options.extra_flags.append('--predictable')
options.extra_flags.append('--verify-predictable')
options.extra_flags.append('--no-inline-new')
self.options.variants = 'default'
self.options.extra_flags.append('--predictable')
self.options.extra_flags.append('--verify-predictable')
self.options.extra_flags.append('--no-inline-new')
# Add predictable wrapper to command prefix.
options.command_prefix = (
[sys.executable, self._predictable_wrapper()] + options.command_prefix)
self.options.command_prefix = (
[sys.executable, self._predictable_wrapper()] + self.options.command_prefix)
# TODO(machenbach): Figure out how to test a bigger subset of variants on
# msan.
if self.build_config.msan:
options.variants = 'default'
self.options.variants = 'default'
if options.variants == 'infra_staging':
options.variants = 'exhaustive'
if self.options.variants == 'infra_staging':
self.options.variants = 'exhaustive'
self._variants = self._parse_variants(options.variants)
self._variants = self._parse_variants(self.options.variants)
def CheckTestMode(name, option): # pragma: no cover
if option not in ['run', 'skip', 'dontcare']:
print('Unknown %s mode %s' % (name, option))
raise base_runner.TestRunnerError()
CheckTestMode('slow test', options.slow_tests)
CheckTestMode('pass|fail test', options.pass_fail_tests)
CheckTestMode('slow test', self.options.slow_tests)
CheckTestMode('pass|fail test', self.options.pass_fail_tests)
if self.build_config.no_i18n:
base_runner.TEST_MAP['bot_default'].remove('intl')
base_runner.TEST_MAP['default'].remove('intl')
# TODO(machenbach): uncomment after infra side lands.
# base_runner.TEST_MAP['d8_default'].remove('intl')
if options.time and not options.json_test_results:
if self.options.time and not self.options.json_test_results:
# We retrieve the slowest tests from the JSON output file, so create
# a temporary output file (which will automatically get deleted on exit)
# if the user didn't specify one.
self._temporary_json_output_file = tempfile.NamedTemporaryFile(
prefix="v8-test-runner-")
options.json_test_results = self._temporary_json_output_file.name
self.options.json_test_results = self._temporary_json_output_file.name
def _runner_flags(self):
return EXTRA_DEFAULT_FLAGS
......@@ -247,7 +254,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
print(' Available variants: %s' % ALL_VARIANTS)
print(' Available variant aliases: %s' % VARIANT_ALIASES.keys());
raise base_runner.TestRunnerError()
assert False, 'Unreachable'
assert False, 'Unreachable' # pragma: no cover
def _setup_env(self):
super(StandardTestRunner, self)._setup_env()
......@@ -262,29 +269,29 @@ class StandardTestRunner(base_runner.BaseTestRunner):
'allow_user_segv_handler=1',
])
def _get_statusfile_variables(self, options):
def _get_statusfile_variables(self):
variables = (
super(StandardTestRunner, self)._get_statusfile_variables(options))
super(StandardTestRunner, self)._get_statusfile_variables())
variables.update({
'gc_stress': options.gc_stress or options.random_gc_stress,
'gc_fuzzer': options.random_gc_stress,
'novfp3': options.novfp3,
'gc_stress': self.options.gc_stress or self.options.random_gc_stress,
'gc_fuzzer': self.options.random_gc_stress,
'novfp3': self.options.novfp3,
})
return variables
def _create_sequence_proc(self, options):
def _create_sequence_proc(self):
"""Create processor for sequencing heavy tests on swarming."""
return SequenceProc(options.max_heavy_tests) if options.swarming else None
return SequenceProc(self.options.max_heavy_tests) if self.options.swarming else None
def _do_execute(self, tests, args, options):
jobs = options.j
def _do_execute(self, tests, args):
jobs = self.options.j
print('>>> Running with test processors')
loader = LoadProc(tests)
results = self._create_result_tracker(options)
loader = LoadProc(tests, initial_batch_size=self.options.j * 2)
results = ResultsTracker.create(self.options)
indicators = self._create_progress_indicators(
tests.test_count_estimate, options)
tests.test_count_estimate)
outproc_factory = None
if self.build_config.predictable:
......@@ -295,24 +302,23 @@ class StandardTestRunner(base_runner.BaseTestRunner):
procs = [
loader,
NameFilterProc(args) if args else None,
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
StatusFileFilterProc(self.options.slow_tests, self.options.pass_fail_tests),
VariantProc(self._variants),
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
StatusFileFilterProc(self.options.slow_tests, self.options.pass_fail_tests),
self._create_predictable_filter(),
self._create_shard_proc(options),
self._create_seed_proc(options),
self._create_sequence_proc(options),
ShardProc.create(self.options),
self._create_seed_proc(),
self._create_sequence_proc(),
sigproc,
] + indicators + [
results,
self._create_timeout_proc(options),
self._create_rerun_proc(options),
TimeoutProc.create(self.options),
RerunProc.create(self.options),
execproc,
]
self._prepare_procs(procs)
loader.load_initial_tests(initial_batch_size=options.j * 2)
loader.load_initial_tests()
# This starts up worker processes and blocks until all tests are
# processed.
......@@ -338,13 +344,13 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if not results.total:
exit_code = utils.EXIT_CODE_NO_TESTS
if options.time:
self._print_durations(options)
if self.options.time:
self._print_durations()
# Indicate if a SIGINT or SIGTERM happened.
return max(exit_code, sigproc.exit_code)
def _print_durations(self, options):
def _print_durations(self):
def format_duration(duration_in_seconds):
duration = datetime.timedelta(seconds=duration_in_seconds)
......@@ -359,8 +365,8 @@ class StandardTestRunner(base_runner.BaseTestRunner):
'Duration: %s' % format_duration(test['duration']),
]
assert os.path.exists(options.json_test_results)
with open(options.json_test_results, "r") as f:
assert os.path.exists(self.options.json_test_results)
with open(self.options.json_test_results, "r") as f:
output = json.load(f)
lines = []
for test in output['slowest_tests']:
......@@ -382,12 +388,12 @@ class StandardTestRunner(base_runner.BaseTestRunner):
return None
return predictable.PredictableFilterProc()
def _create_seed_proc(self, options):
if options.random_seed_stress_count == 1:
def _create_seed_proc(self):
if self.options.random_seed_stress_count == 1:
return None
return SeedProc(options.random_seed_stress_count, options.random_seed,
options.j * 4)
return SeedProc(self.options.random_seed_stress_count, self.options.random_seed,
self.options.j * 4)
if __name__ == '__main__':
sys.exit(StandardTestRunner().execute())
sys.exit(StandardTestRunner().execute()) # pragma: no cover
......@@ -17,203 +17,59 @@ with different test suite extensions and build configurations.
# TODO(machenbach): Coverage data from multiprocessing doesn't work.
# TODO(majeski): Add some tests for the fuzzers.
import collections
import contextlib
import json
import os
import shutil
import sys
import tempfile
import unittest
from os.path import dirname as up
from io import StringIO
TOOLS_ROOT = os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))
TOOLS_ROOT = up(up(os.path.abspath(__file__)))
sys.path.append(TOOLS_ROOT)
from testrunner import standard_runner
from testrunner import num_fuzzer
from testrunner.local import command
from testrunner.local import pool
TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'testrunner', 'testdata')
Result = collections.namedtuple(
'Result', ['stdout', 'stderr', 'returncode', 'json'])
Result.__str__ = lambda self: (
'\nReturncode: %s\nStdout:\n%s\nStderr:\n%s\n' %
(self.returncode, self.stdout, self.stderr))
@contextlib.contextmanager
def temp_dir():
"""Wrapper making a temporary directory available."""
path = None
try:
path = tempfile.mkdtemp('v8_test_')
yield path
finally:
if path:
shutil.rmtree(path)
@contextlib.contextmanager
def temp_base(baseroot='testroot1'):
"""Wrapper that sets up a temporary V8 test root.
Args:
baseroot: The folder with the test root blueprint. Relevant files will be
copied to the temporary test root, to guarantee a fresh setup with no
dirty state.
"""
basedir = os.path.join(TEST_DATA_ROOT, baseroot)
with temp_dir() as tempbase:
if not os.path.exists(basedir):
yield tempbase
return
builddir = os.path.join(tempbase, 'out', 'build')
testroot = os.path.join(tempbase, 'test')
os.makedirs(builddir)
shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir)
shutil.copy(os.path.join(basedir, 'd8_mocked.py'), builddir)
for suite in os.listdir(os.path.join(basedir, 'test')):
os.makedirs(os.path.join(testroot, suite))
for entry in os.listdir(os.path.join(basedir, 'test', suite)):
shutil.copy(
os.path.join(basedir, 'test', suite, entry),
os.path.join(testroot, suite))
yield tempbase
@contextlib.contextmanager
def capture():
"""Wrapper that replaces system stdout/stderr an provides the streams."""
oldout = sys.stdout
olderr = sys.stderr
try:
stdout=StringIO()
stderr=StringIO()
sys.stdout = stdout
sys.stderr = stderr
yield stdout, stderr
finally:
sys.stdout = oldout
sys.stderr = olderr
def run_tests(*args, baseroot='testroot1', config_overrides={}, **kwargs):
"""Executes the test runner with captured output."""
with temp_base(baseroot=baseroot) as basedir:
override_build_config(basedir, **config_overrides)
json_out_path = None
def resolve_arg(arg):
"""Some arguments come as function objects to be called (resolved)
in the context of a temporary test configuration"""
nonlocal json_out_path
if arg == with_json_output:
json_out_path = with_json_output(basedir)
return json_out_path
return arg
resolved_args = [resolve_arg(arg) for arg in args]
with capture() as (stdout, stderr):
sys_args = ['--command-prefix', sys.executable] + resolved_args
if kwargs.get('infra_staging', False):
sys_args.append('--infra-staging')
else:
sys_args.append('--no-infra-staging')
code = standard_runner.StandardTestRunner(basedir=basedir).execute(sys_args)
json_out = clean_json_output(json_out_path, basedir)
return Result(stdout.getvalue(), stderr.getvalue(), code, json_out)
def with_json_output(basedir):
""" Function used as a placeholder where we need to resolve the value in the
context of a temporary test configuration"""
return os.path.join(basedir, 'out.json')
def clean_json_output(json_path, basedir):
# Extract relevant properties of the json output.
if not json_path:
return None
with open(json_path) as f:
json_output = json.load(f)
# Replace duration in actual output as it's non-deterministic. Also
# replace the python executable prefix as it has a different absolute
# path dependent on where this runs.
def replace_variable_data(data):
data['duration'] = 1
data['command'] = ' '.join(
['/usr/bin/python'] + data['command'].split()[1:])
data['command'] = data['command'].replace(basedir + '/', '')
for data in json_output['slowest_tests']:
replace_variable_data(data)
for data in json_output['results']:
replace_variable_data(data)
json_output['duration_mean'] = 1
# We need lexicographic sorting here to avoid non-deterministic behaviour
# The original sorting key is duration, but in our fake test we have
# non-deterministic durations before we reset them to 1
def sort_key(x):
return str(sorted(x.items()))
json_output['slowest_tests'].sort(key=sort_key)
return json_output
def override_build_config(basedir, **kwargs):
"""Override the build config with new values provided as kwargs."""
if not kwargs:
return
path = os.path.join(basedir, 'out', 'build', 'v8_build_config.json')
with open(path) as f:
config = json.load(f)
config.update(kwargs)
with open(path, 'w') as f:
json.dump(config, f)
class SystemTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
command.setup_testing()
pool.setup_testing()
from testrunner.utils.test_utils import (
temp_base,
TestRunnerTest,
with_json_output,
)
class StandardRunnerTest(TestRunnerTest):
def get_runner_class(self):
return standard_runner.StandardTestRunner
def testPass(self):
"""Test running only passing tests in two variants.
Also test printing durations.
"""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=default,stress',
'--time',
'sweet/bananas',
'sweet/raspberries',
)
self.assertIn('sweet/bananas default: PASS', result.stdout, result)
result.stdout_includes('sweet/bananas default: PASS')
# TODO(majeski): Implement for test processors
# self.assertIn('Total time:', result.stderr, result)
# self.assertIn('sweet/bananas', result.stderr, result)
self.assertEqual(0, result.returncode, result)
result.has_returncode(0)
def testPassHeavy(self):
"""Test running with some tests marked heavy."""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=nooptimization',
'-j2',
'sweet',
baseroot='testroot3',
)
self.assertIn('7 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
result.stdout_includes('7 tests ran')
result.has_returncode(0)
def testShardedProc(self):
for shard in [1, 2]:
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=default,stress',
'--shard-count=2',
......@@ -223,23 +79,23 @@ class SystemTest(unittest.TestCase):
infra_staging=False,
)
# One of the shards gets one variant of each test.
self.assertIn('2 tests ran', result.stdout, result)
result.stdout_includes('2 tests ran')
if shard == 1:
self.assertIn('sweet/raspberries default', result.stdout, result)
self.assertIn('sweet/raspberries stress', result.stdout, result)
self.assertEqual(0, result.returncode, result)
result.stdout_includes('sweet/raspberries default')
result.stdout_includes('sweet/raspberries stress')
result.has_returncode(0)
else:
self.assertIn(
'sweet/blackberries default: FAIL', result.stdout, result)
self.assertIn(
'sweet/blackberries stress: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result)
result.stdout_includes(
'sweet/blackberries default: FAIL')
result.stdout_includes(
'sweet/blackberries stress: FAIL')
result.has_returncode(1)
@unittest.skip("incompatible with test processors")
def testSharded(self):
"""Test running a particular shard."""
for shard in [1, 2]:
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=default,stress',
'--shard-count=2',
......@@ -248,35 +104,41 @@ class SystemTest(unittest.TestCase):
'sweet/raspberries',
)
# One of the shards gets one variant of each test.
self.assertIn('Running 2 tests', result.stdout, result)
self.assertIn('sweet/bananas', result.stdout, result)
self.assertIn('sweet/raspberries', result.stdout, result)
self.assertEqual(0, result.returncode, result)
result.stdout_includes('Running 2 tests')
result.stdout_includes('sweet/bananas')
result.stdout_includes('sweet/raspberries')
result.has_returncode(0)
def testFail(self):
"""Test running only failing tests in two variants."""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=default,stress',
'sweet/strawberries',
infra_staging=False,
)
self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result)
result.stdout_includes('sweet/strawberries default: FAIL')
result.has_returncode(1)
def testGN(self):
"""Test running only failing tests in two variants."""
result = self.run_tests('--gn',baseroot="testroot5")
result.stdout_includes('>>> Latest GN build found: build')
result.stdout_includes('Build found: ')
result.stdout_includes('v8_test_/out.gn/build')
result.has_returncode(2)
def check_cleaned_json_output(
self, expected_results_name, actual_json_out, basedir=None):
with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
expected_test_results = json.load(f)
pretty_json = json.dumps(actual_json_out, indent=2, sort_keys=True)
msg = None # Set to pretty_json for bootstrapping.
self.assertDictEqual(actual_json_out, expected_test_results, msg)
def testMalformedJsonConfig(self):
"""Test running only failing tests in two variants."""
result = self.run_tests(baseroot="testroot4")
result.stdout_includes('contains invalid json')
result.stdout_includes('Failed to load build config')
result.stderr_includes('testrunner.base_runner.TestRunnerError')
result.has_returncode(5)
def testFailWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=default',
'--rerun-failures-count=2',
......@@ -285,22 +147,21 @@ class SystemTest(unittest.TestCase):
'sweet/strawberries',
infra_staging=False,
)
self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
result.stdout_includes('sweet/strawberries default: FAIL')
# With test processors we don't count reruns as separated failures.
# TODO(majeski): fix it?
self.assertIn('1 tests failed', result.stdout, result)
self.assertEqual(0, result.returncode, result)
result.stdout_includes('1 tests failed')
result.has_returncode(0)
# TODO(majeski): Previously we only reported the variant flags in the
# flags field of the test result.
# After recent changes we report all flags, including the file names.
# This is redundant to the command. Needs investigation.
self.check_cleaned_json_output(
'expected_test_results1.json', result.json)
result.json_content_equals('expected_test_results1.json')
def testFlakeWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=default',
'--rerun-failures-count=2',
......@@ -310,12 +171,12 @@ class SystemTest(unittest.TestCase):
baseroot='testroot2',
infra_staging=False,
)
self.assertIn('sweet/bananaflakes default: FAIL PASS', result.stdout, result)
self.assertIn('=== sweet/bananaflakes (flaky) ===', result.stdout, result)
self.assertIn('1 tests failed', result.stdout, result)
self.assertIn('1 tests were flaky', result.stdout, result)
self.assertEqual(0, result.returncode, result)
self.check_cleaned_json_output('expected_test_results2.json', result.json)
result.stdout_includes('sweet/bananaflakes default: FAIL PASS')
result.stdout_includes('=== sweet/bananaflakes (flaky) ===')
result.stdout_includes('1 tests failed')
result.stdout_includes('1 tests were flaky')
result.has_returncode(0)
result.json_content_equals('expected_test_results2.json')
def testAutoDetect(self):
"""Fake a build with several auto-detected options.
......@@ -323,7 +184,7 @@ class SystemTest(unittest.TestCase):
Using all those options at once doesn't really make much sense. This is
merely for getting coverage.
"""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=default',
'sweet/bananas',
......@@ -349,68 +210,67 @@ class SystemTest(unittest.TestCase):
'ubsan_vptr\n'
'webassembly\n'
'>>> Running tests for ia32.release')
self.assertIn(expect_text, result.stdout, result)
self.assertEqual(0, result.returncode, result)
result.stdout_includes(expect_text)
result.has_returncode(0)
# TODO(machenbach): Test some more implications of the auto-detected
# options, e.g. that the right env variables are set.
def testSkips(self):
"""Test skipping tests in status file for a specific variant."""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=nooptimization',
'sweet/strawberries',
infra_staging=False,
)
self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
result.stdout_includes('0 tests ran')
result.has_returncode(2)
def testRunSkips(self):
"""Inverse the above. Test parameter to keep running skipped tests."""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=nooptimization',
'--run-skipped',
'sweet/strawberries',
)
self.assertIn('1 tests failed', result.stdout, result)
self.assertIn('1 tests ran', result.stdout, result)
self.assertEqual(1, result.returncode, result)
result.stdout_includes('1 tests failed')
result.stdout_includes('1 tests ran')
result.has_returncode(1)
def testDefault(self):
"""Test using default test suites, though no tests are run since they don't
exist in a test setting.
"""
result = run_tests(
result = self.run_tests(
infra_staging=False,
)
self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
result.stdout_includes('0 tests ran')
result.has_returncode(2)
def testNoBuildConfig(self):
"""Test failing run when build config is not found."""
result = run_tests(baseroot='wrong_path')
self.assertIn('Failed to load build config', result.stdout, result)
self.assertEqual(5, result.returncode, result)
result = self.run_tests(baseroot='wrong_path')
result.stdout_includes('Failed to load build config')
result.has_returncode(5)
def testInconsistentArch(self):
"""Test failing run when attempting to wrongly override the arch."""
result = run_tests('--arch=ia32')
self.assertIn(
'--arch value (ia32) inconsistent with build config (x64).',
result.stdout, result)
self.assertEqual(5, result.returncode, result)
result = self.run_tests('--arch=ia32')
result.stdout_includes(
'--arch value (ia32) inconsistent with build config (x64).')
result.has_returncode(5)
def testWrongVariant(self):
"""Test using a bogus variant."""
result = run_tests('--variants=meh')
self.assertEqual(5, result.returncode, result)
result = self.run_tests('--variants=meh')
result.has_returncode(5)
def testModeFromBuildConfig(self):
"""Test auto-detection of mode from build config."""
result = run_tests('--outdir=out/build', 'sweet/bananas')
self.assertIn('Running tests for x64.release', result.stdout, result)
self.assertEqual(0, result.returncode, result)
result = self.run_tests('--outdir=out/build', 'sweet/bananas')
result.stdout_includes('Running tests for x64.release')
result.has_returncode(0)
def testPredictable(self):
"""Test running a test in verify-predictable mode.
......@@ -418,22 +278,22 @@ class SystemTest(unittest.TestCase):
The test will fail because of missing allocation output. We verify that and
that the predictable flags are passed and printed after failure.
"""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=default',
'sweet/bananas',
infra_staging=False,
config_overrides=dict(v8_enable_verify_predictable=True),
)
self.assertIn('1 tests ran', result.stdout, result)
self.assertIn('sweet/bananas default: FAIL', result.stdout, result)
self.assertIn('Test had no allocation output', result.stdout, result)
self.assertIn('--predictable --verify-predictable', result.stdout, result)
self.assertEqual(1, result.returncode, result)
result.stdout_includes('1 tests ran')
result.stdout_includes('sweet/bananas default: FAIL')
result.stdout_includes('Test had no allocation output')
result.stdout_includes('--predictable --verify-predictable')
result.has_returncode(1)
def testSlowArch(self):
"""Test timeout factor manipulation on slow architecture."""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=default',
'sweet/bananas',
......@@ -441,34 +301,34 @@ class SystemTest(unittest.TestCase):
)
# TODO(machenbach): We don't have a way for testing if the correct
# timeout was used.
self.assertEqual(0, result.returncode, result)
result.has_returncode(0)
def testRandomSeedStressWithDefault(self):
"""Test using random-seed-stress feature has the right number of tests."""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=default',
'--random-seed-stress-count=2',
'sweet/bananas',
infra_staging=False,
)
self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
result.stdout_includes('2 tests ran')
result.has_returncode(0)
def testRandomSeedStressWithSeed(self):
"""Test using random-seed-stress feature passing a random seed."""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=default',
'--random-seed-stress-count=2',
'--random-seed=123',
'sweet/strawberries',
)
self.assertIn('2 tests ran', result.stdout, result)
result.stdout_includes('2 tests ran')
# We use a failing test so that the command is printed and we can verify
# that the right random seed was passed.
self.assertIn('--random-seed=123', result.stdout, result)
self.assertEqual(1, result.returncode, result)
result.stdout_includes('--random-seed=123')
result.has_returncode(1)
def testSpecificVariants(self):
"""Test using NO_VARIANTS modifiers in status files skips the desire tests.
......@@ -477,7 +337,7 @@ class SystemTest(unittest.TestCase):
But the status file applies a modifier to each skipping one of the
variants.
"""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=default,stress',
'sweet/bananas',
......@@ -486,27 +346,20 @@ class SystemTest(unittest.TestCase):
)
# Both tests are either marked as running in only default or only
# slow variant.
self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testStatusFilePresubmit(self):
"""Test that the fake status file is well-formed."""
with temp_base() as basedir:
from testrunner.local import statusfile
self.assertTrue(statusfile.PresubmitCheck(
os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
result.stdout_includes('2 tests ran')
result.has_returncode(0)
def testDotsProgress(self):
result = run_tests(
result = self.run_tests(
'--progress=dots',
'sweet/cherries',
'sweet/bananas',
'--no-sorting', '-j1', # make results order deterministic
infra_staging=False,
)
self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('F.', result.stdout, result)
self.assertEqual(1, result.returncode, result)
result.stdout_includes('2 tests ran')
result.stdout_includes('F.')
result.has_returncode(1)
def testMonoProgress(self):
self._testCompactProgress('mono')
......@@ -515,7 +368,7 @@ class SystemTest(unittest.TestCase):
self._testCompactProgress('color')
def _testCompactProgress(self, name):
result = run_tests(
result = self.run_tests(
'--progress=%s' % name,
'sweet/cherries',
'sweet/bananas',
......@@ -527,13 +380,13 @@ class SystemTest(unittest.TestCase):
'\033[31m- 1\033[0m]: Done')
else:
expected = '% 28|+ 1|- 1]: Done'
self.assertIn(expected, result.stdout)
self.assertIn('sweet/cherries', result.stdout)
self.assertIn('sweet/bananas', result.stdout)
self.assertEqual(1, result.returncode, result)
result.stdout_includes(expected)
result.stdout_includes('sweet/cherries')
result.stdout_includes('sweet/bananas')
result.has_returncode(1)
def testExitAfterNFailures(self):
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--exit-after-n-failures=2',
'-j1',
......@@ -542,28 +395,18 @@ class SystemTest(unittest.TestCase):
'sweet/blackberries', # FAIL
'sweet/raspberries', # should not run
)
self.assertIn('sweet/mangoes default: PASS', result.stdout, result)
self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
self.assertIn('Too many failures, exiting...', result.stdout, result)
self.assertIn('sweet/blackberries default: FAIL', result.stdout, result)
self.assertNotIn('sweet/raspberries', result.stdout, result)
self.assertIn('2 tests failed', result.stdout, result)
self.assertIn('3 tests ran', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testNumFuzzer(self):
sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/build']
with temp_base() as basedir:
with capture() as (stdout, stderr):
code = num_fuzzer.NumFuzzer(basedir=basedir).execute(sys_args)
result = Result(stdout.getvalue(), stderr.getvalue(), code, None)
self.assertEqual(0, result.returncode, result)
result.stdout_includes('sweet/mangoes default: PASS')
result.stdout_includes('sweet/strawberries default: FAIL')
result.stdout_includes('Too many failures, exiting...')
result.stdout_includes('sweet/blackberries default: FAIL')
result.stdout_excludes('sweet/raspberries')
result.stdout_includes('2 tests failed')
result.stdout_includes('3 tests ran')
result.has_returncode(1)
def testRunnerFlags(self):
"""Test that runner-specific flags are passed to tests."""
result = run_tests(
result = self.run_tests(
'--progress=verbose',
'--variants=default',
'--random-seed=42',
......@@ -571,11 +414,30 @@ class SystemTest(unittest.TestCase):
'-v',
)
self.assertIn(
'--test bananas --random-seed=42 --nohard-abort --testing-d8-test-runner',
result.stdout, result)
self.assertEqual(0, result.returncode, result)
result.stdout_includes(
'--test bananas --random-seed=42 --nohard-abort --testing-d8-test-runner')
result.has_returncode(0)
class NumFuzzerTest(TestRunnerTest):
def get_runner_class(self):
return num_fuzzer.NumFuzzer
def testNumFuzzer(self):
result = self.run_tests(
'--command-prefix', sys.executable,
'--outdir', 'out/build',
)
result.has_returncode(0)
result.stdout_includes('>>> Autodetected')
class OtherTest(TestRunnerTest):
def testStatusFilePresubmit(self):
"""Test that the fake status file is well-formed."""
with temp_base() as basedir:
from testrunner.local import statusfile
self.assertTrue(statusfile.PresubmitCheck(
os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
if __name__ == '__main__':
unittest.main()
{
"current_cpu": "x64",
"dcheck_always_on": false,
"is_android": false,
"is_asan": false,
"is_cfi": false,
"is_clang": true,
"is_component_build": false,
"is_debug": false,
"is_full_debug": false,
"is_gcov_coverage": false,
"is_ubsan_vptr": false,
"is_msan": false,
"is_tsan": false,
"target_cpu": "x64",
"v8_current_cpu": "x64",
"v8_enable_i18n_support": true,
"v8_enable_verify_predictable": false,
"v8_target_cpu": "x64",
"v8_enable_concurrent_marking": true,
"v8_enable_verify_csa": false,
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": true,
"v8_enable_pointer_compression_shared_cage": true,
"v8_enable_sandbox": false,
"v8_enable_shared_ro_heap": true,
"v8_control_flow_integrity": false,
"v8_enable_single_generation": false,
"v8_enable_third_party_heap": false,
"v8_enable_webassembly": true,
"v8_dict_property_const_tracking": false
}
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
[
[ALWAYS, {
'raspberries': FAIL,
'strawberries': [PASS, ['mode == release', SLOW], ['mode == debug', NO_VARIANTS]],
'mangoes': [PASS, SLOW],
# Both cherries and apples are to test how PASS an FAIL from different
# sections are merged.
'cherries': [PASS, SLOW],
'apples': [FAIL],
# Unused rule.
'carrots': [PASS, FAIL],
}],
['variant == nooptimization', {
'strawberries': [SKIP],
}],
['arch == x64', {
'cherries': [FAIL],
'apples': [PASS, SLOW],
# Unused rule.
'regress/*': [CRASH],
}],
['asan', {
'bananas': [PASS, NO_VARIANTS],
'raspberries': [FAIL, NO_VARIANTS],
}],
]
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Dummy test suite extension with some fruity tests.
"""
from testrunner.local import testsuite
from testrunner.objects import testcase
class TestLoader(testsuite.TestLoader):
def _list_test_filenames(self):
return [
'bananas', 'apples', 'cherries', 'mangoes', 'strawberries',
'blackberries', 'raspberries',
]
class TestSuite(testsuite.TestSuite):
def _test_loader_class(self):
return TestLoader
def _test_class(self):
return TestCase
class TestCase(testcase.D8TestCase):
def get_shell(self):
return 'd8_mocked.py'
def _get_files_params(self):
return [self.name]
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -7,6 +7,13 @@ from collections import defaultdict
from . import base
class CombinerProc(base.TestProc):
@staticmethod
def create(options):
if not options.combine_tests:
return None
return CombinerProc(options.fuzzer_rng(), options.combine_min, options.combine_max,
options.tests_count)
def __init__(self, rng, min_group_size, max_group_size, count):
"""
Args:
......
......@@ -138,6 +138,15 @@ class Fuzzer(object):
# TODO(majeski): Allow multiple subtests to run at once.
class FuzzerProc(base.TestProcProducer):
@staticmethod
def create(options):
return FuzzerProc(
options.fuzzer_rng(),
options.fuzzer_tests_count(),
options.fuzzer_configs(),
options.combine_tests,
)
def __init__(self, rng, count, fuzzers, disable_analysis=False):
"""
Args:
......
......@@ -4,24 +4,25 @@
from . import base
class LoadProc(base.TestProc):
"""First processor in the chain that passes all tests to the next processor.
"""
def __init__(self, tests):
def __init__(self, tests, initial_batch_size=float('inf')):
super(LoadProc, self).__init__()
self.tests = tests
self.initial_batch_size = initial_batch_size
def load_initial_tests(self, initial_batch_size):
def load_initial_tests(self):
"""
Args:
exec_proc: execution processor that the tests are being loaded into
initial_batch_size: initial number of tests to load
"""
loaded_tests = 0
while loaded_tests < initial_batch_size:
while loaded_tests < self.initial_batch_size:
try:
t = next(self.tests)
except StopIteration:
......
......@@ -24,6 +24,10 @@ def print_failure_header(test, is_flaky=False):
class ResultsTracker(base.TestProcObserver):
@staticmethod
def create(options):
return ResultsTracker(options.exit_after_n_failures)
"""Tracks number of results and stops to run tests if max_failures reached."""
def __init__(self, max_failures):
super(ResultsTracker, self).__init__()
......@@ -58,6 +62,10 @@ class ProgressIndicator(base.TestProcObserver):
def configure(self, options):
self.options = options
def set_test_count(self, test_count):
self._total = test_count
class SimpleProgressIndicator(ProgressIndicator):
def __init__(self):
......@@ -246,9 +254,6 @@ class CompactProgressIndicator(ProgressIndicator):
self._passed = 0
self._failed = 0
def set_test_count(self, test_count):
self._total = test_count
def _on_result_for(self, test, result):
# TODO(majeski): Support for dummy/grouped results
if result.has_unexpected_output:
......
......@@ -9,6 +9,13 @@ from .result import RerunResult
class RerunProc(base.TestProcProducer):
@staticmethod
def create(options):
if not options.rerun_failures_count:
return None
return RerunProc(options.rerun_failures_count,
options.rerun_failures_max)
def __init__(self, rerun_max, rerun_max_total=None):
super(RerunProc, self).__init__('Rerun')
self._requirement = base.DROP_OUTPUT
......
......@@ -64,7 +64,7 @@ class TestSequenceProc(unittest.TestCase):
def _test(self, tests, batch_size, max_heavy):
# Set up a simple processing pipeline:
# Loader -> observe results -> sequencer -> execution.
loader = LoadProc(iter(tests))
loader = LoadProc(iter(tests), initial_batch_size=batch_size)
results = FakeResultObserver()
sequence_proc = SequenceProc(max_heavy)
execution = FakeExecutionProc()
......@@ -74,7 +74,7 @@ class TestSequenceProc(unittest.TestCase):
# Fill the execution queue (with the number of tests potentially
# executed in parallel).
loader.load_initial_tests(batch_size)
loader.load_initial_tests()
# Simulate the execution test by test.
while execution.tests:
......
......@@ -18,6 +18,13 @@ def radix_hash(capacity, key):
class ShardProc(base.TestProcFilter):
@staticmethod
def create(options):
myid, count = options.shard_info()
if count == 1:
return None
return ShardProc(myid, count)
"""Processor distributing tests between shards.
It hashes the unique test identifiers uses the hash to shard tests.
"""
......
......@@ -8,6 +8,12 @@ from . import base
class TimeoutProc(base.TestProcObserver):
@staticmethod
def create(options):
if not options.total_timeout_sec:
return None
return TimeoutProc(options.total_timeout_sec)
def __init__(self, duration_sec):
super(TimeoutProc, self).__init__()
self._duration_sec = duration_sec
......
# Copyright 2022 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import random
from testrunner.testproc import fuzzer
class AugmentedOptions(optparse.Values):
"""This class will augment exiting options object with
a couple of convenient methods and properties.
"""
@staticmethod
def augment(options_object):
options_object.__class__ = AugmentedOptions
return options_object
def fuzzer_rng(self):
if not getattr(self,'_fuzzer_rng', None):
self._fuzzer_rng = random.Random(self.fuzzer_random_seed)
return self._fuzzer_rng
def shard_info(self):
"""
Returns pair:
(id of the current shard [1; number of shards], number of shards)
"""
# Read gtest shard configuration from environment (e.g. set by swarming).
# If none is present, use values passed on the command line.
count = int(
os.environ.get('GTEST_TOTAL_SHARDS', self.shard_count))
run = os.environ.get('GTEST_SHARD_INDEX')
# The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
run = int(run) + 1 if run else self.shard_run
if self.shard_count > 1:
# Log if a value was passed on the cmd line and it differs from the
# environment variables.
if self.shard_count != count: # pragma: no cover
print("shard_count from cmd line differs from environment variable "
"GTEST_TOTAL_SHARDS")
if (self.shard_run > 1 and
self.shard_run != run): # pragma: no cover
print("shard_run from cmd line differs from environment variable "
"GTEST_SHARD_INDEX")
if run < 1 or run > count:
# TODO(machenbach): Turn this into an assert. If that's wrong on the
# bots, printing will be quite useless. Or refactor this code to make
# sure we get a return code != 0 after testing if we got here.
print("shard-run not a valid number, should be in [1:shard-count]")
print("defaulting back to running all tests")
return 0, 1
return run - 1, count # coming back to 0 based counting
def fuzzer_configs(self):
fuzzers = []
def add(name, prob, *args):
if prob:
fuzzers.append(fuzzer.create_fuzzer_config(name, prob, *args))
add('compaction', self.stress_compaction)
add('marking', self.stress_marking)
add('scavenge', self.stress_scavenge)
add('gc_interval', self.stress_gc)
add('stack', self.stress_stack_size)
add('threads', self.stress_thread_pool_size)
add('delay', self.stress_delay_tasks)
add('deopt', self.stress_deopt, self.stress_deopt_min)
return fuzzers
def fuzzer_tests_count(self):
if self.combine_tests:
return 1
return self.tests_count
# Copyright 2022 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import json
import os
import shutil
import sys
import tempfile
import unittest
from dataclasses import dataclass
from io import StringIO
from os.path import dirname as up
TOOLS_ROOT = up(up(up(os.path.abspath(__file__))))
sys.path.append(TOOLS_ROOT)
TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'testrunner', 'testdata')
from testrunner.local import command
from testrunner.local import pool
@contextlib.contextmanager
def temp_dir():
"""Wrapper making a temporary directory available."""
path = None
try:
path = tempfile.mkdtemp('v8_test_')
yield path
finally:
if path:
shutil.rmtree(path)
@contextlib.contextmanager
def temp_base(baseroot='testroot1'):
"""Wrapper that sets up a temporary V8 test root.
Args:
baseroot: The folder with the test root blueprint. All files will be
copied to the temporary test root, to guarantee a fresh setup with no
dirty state.
"""
basedir = os.path.join(TEST_DATA_ROOT, baseroot)
with temp_dir() as tempbase:
if os.path.exists(basedir):
shutil.copytree(basedir, tempbase, dirs_exist_ok=True)
yield tempbase
@contextlib.contextmanager
def capture():
"""Wrapper that replaces system stdout/stderr an provides the streams."""
oldout = sys.stdout
olderr = sys.stderr
try:
stdout=StringIO()
stderr=StringIO()
sys.stdout = stdout
sys.stderr = stderr
yield stdout, stderr
finally:
sys.stdout = oldout
sys.stderr = olderr
def with_json_output(basedir):
""" Function used as a placeholder where we need to resolve the value in the
context of a temporary test configuration"""
return os.path.join(basedir, 'out.json')
def clean_json_output(json_path, basedir):
# Extract relevant properties of the json output.
if not json_path:
return None
with open(json_path) as f:
json_output = json.load(f)
# Replace duration in actual output as it's non-deterministic. Also
# replace the python executable prefix as it has a different absolute
# path dependent on where this runs.
def replace_variable_data(data):
data['duration'] = 1
data['command'] = ' '.join(
['/usr/bin/python'] + data['command'].split()[1:])
data['command'] = data['command'].replace(basedir + '/', '')
for data in json_output['slowest_tests']:
replace_variable_data(data)
for data in json_output['results']:
replace_variable_data(data)
json_output['duration_mean'] = 1
# We need lexicographic sorting here to avoid non-deterministic behaviour
# The original sorting key is duration, but in our fake test we have
# non-deterministic durations before we reset them to 1
def sort_key(x):
return str(sorted(x.items()))
json_output['slowest_tests'].sort(key=sort_key)
return json_output
def override_build_config(basedir, **kwargs):
"""Override the build config with new values provided as kwargs."""
if not kwargs:
return
path = os.path.join(basedir, 'out', 'build', 'v8_build_config.json')
with open(path) as f:
config = json.load(f)
config.update(kwargs)
with open(path, 'w') as f:
json.dump(config, f)
@dataclass
class TestResult():
stdout: str
stderr: str
returncode: int
json: str
current_test_case: unittest.TestCase
def __str__(self):
return f'\nReturncode: {self.returncode}\nStdout:\n{self.stdout}\nStderr:\n{self.stderr}\n'
def has_returncode(self, code):
self.current_test_case.assertEqual(code, self.returncode, self)
def stdout_includes(self, content):
self.current_test_case.assertIn(content, self.stdout, self)
def stdout_excludes(self, content):
self.current_test_case.assertNotIn(content, self.stdout, self)
def stderr_includes(self, content):
self.current_test_case.assertIn(content, self.stderr, self)
def stderr_excludes(self, content):
self.current_test_case.assertNotIn(content, self.stderr, self)
def json_content_equals(self, expected_results_file):
with open(os.path.join(TEST_DATA_ROOT, expected_results_file)) as f:
expected_test_results = json.load(f)
pretty_json = json.dumps(self.json, indent=2, sort_keys=True)
msg = None # Set to pretty_json for bootstrapping.
self.current_test_case.assertDictEqual(self.json, expected_test_results, msg)
class TestRunnerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
command.setup_testing()
pool.setup_testing()
def run_tests(self, *args, baseroot='testroot1', config_overrides={}, **kwargs):
"""Executes the test runner with captured output."""
with temp_base(baseroot=baseroot) as basedir:
override_build_config(basedir, **config_overrides)
json_out_path = None
def resolve_arg(arg):
"""Some arguments come as function objects to be called (resolved)
in the context of a temporary test configuration"""
nonlocal json_out_path
if arg == with_json_output:
json_out_path = with_json_output(basedir)
return json_out_path
return arg
resolved_args = [resolve_arg(arg) for arg in args]
with capture() as (stdout, stderr):
sys_args = ['--command-prefix', sys.executable] + resolved_args
if kwargs.get('infra_staging', False):
sys_args.append('--infra-staging')
else:
sys_args.append('--no-infra-staging')
code = self.get_runner_class()(basedir=basedir).execute(sys_args)
json_out = clean_json_output(json_out_path, basedir)
return TestResult(stdout.getvalue(), stderr.getvalue(), code, json_out, self)
def get_runner_class():
"""Implement to return the runner class"""
return None
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment