Commit 9733dacc authored by Liviu Rau's avatar Liviu Rau Committed by V8 LUCI CQ

[test] Refactor testrunner (3)

- removed some 'feature envy' instances:
  - created a AugmentedOptions class to take care of non trivial option look-ups
  - moved some calculation closer the the actual data
- promoted parameter that was passed around a lot (options) to filed in the offending class
- similar object creation looks a bit more similar
- CommandContext provides a wrapper that ensures resource de-allocation
- separate tests from standard and num_fuzz runners
- added a couple of more tests to improve coverage

This is still in flux. But further changes risk creating a disconnect between the original implementation and further refactorings.

Bug: v8:12785
Change-Id: I0ec2e9c6a81604a5cd1d4a80982dd3329c1b48db
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3686411
Commit-Queue: Liviu Rau <liviurau@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81148}
parent 28fe6582
This diff is collapsed.
...@@ -327,6 +327,40 @@ class AndroidCommand(BaseCommand): ...@@ -327,6 +327,40 @@ class AndroidCommand(BaseCommand):
Command = None Command = None
class CommandContext():
def __init__(self, command):
self.command = command
@contextmanager
def context(self, device):
yield
class AndroidContext():
def __init__(self):
self.command = AndroidCommand
@contextmanager
def context(self, device):
try:
AndroidCommand.driver = android_driver(device)
yield
finally:
AndroidCommand.driver.tear_down()
@contextmanager
def command_context(target_os, device):
factory = dict(
android=AndroidContext(),
windows=CommandContext(WindowsCommand),
)
context = factory.get(target_os, CommandContext(PosixCommand))
with context.context(device):
global Command
Command = context.command
yield
# Deprecated : use command_context
def setup(target_os, device): def setup(target_os, device):
"""Set the Command class to the OS-specific version.""" """Set the Command class to the OS-specific version."""
global Command global Command
...@@ -338,6 +372,7 @@ def setup(target_os, device): ...@@ -338,6 +372,7 @@ def setup(target_os, device):
else: else:
Command = PosixCommand Command = PosixCommand
# Deprecated : use command_context
def tear_down(): def tear_down():
"""Clean up after using commands.""" """Clean up after using commands."""
if Command == AndroidCommand: if Command == AndroidCommand:
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import random
import sys import sys
# Adds testrunner to the path hence it has to be imported at the beggining. # Adds testrunner to the path hence it has to be imported at the beggining.
...@@ -19,6 +18,10 @@ from testrunner.testproc.expectation import ExpectationProc ...@@ -19,6 +18,10 @@ from testrunner.testproc.expectation import ExpectationProc
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc from testrunner.testproc.loader import LoadProc
from testrunner.utils import random_utils from testrunner.utils import random_utils
from testrunner.testproc.rerun import RerunProc
from testrunner.testproc.timeout import TimeoutProc
from testrunner.testproc.progress import ResultsTracker
from testrunner.testproc.shard import ShardProc
DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"] DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
...@@ -97,20 +100,20 @@ class NumFuzzer(base_runner.BaseTestRunner): ...@@ -97,20 +100,20 @@ class NumFuzzer(base_runner.BaseTestRunner):
return parser return parser
def _process_options(self, options): def _process_options(self):
if not options.fuzzer_random_seed: if not self.options.fuzzer_random_seed:
options.fuzzer_random_seed = random_utils.random_seed() self.options.fuzzer_random_seed = random_utils.random_seed()
if options.total_timeout_sec: if self.options.total_timeout_sec:
options.tests_count = 0 self.options.tests_count = 0
if options.combine_tests: if self.options.combine_tests:
if options.combine_min > options.combine_max: if self.options.combine_min > self.options.combine_max:
print(('min_group_size (%d) cannot be larger than max_group_size (%d)' % print(('min_group_size (%d) cannot be larger than max_group_size (%d)' %
options.min_group_size, options.max_group_size)) self.options.min_group_size, self.options.max_group_size))
raise base_runner.TestRunnerError() raise base_runner.TestRunnerError()
if options.variants != 'default': if self.options.variants != 'default':
print ('Only default testing variant is supported with numfuzz') print ('Only default testing variant is supported with numfuzz')
raise base_runner.TestRunnerError() raise base_runner.TestRunnerError()
...@@ -125,56 +128,54 @@ class NumFuzzer(base_runner.BaseTestRunner): ...@@ -125,56 +128,54 @@ class NumFuzzer(base_runner.BaseTestRunner):
'--exit-on-contradictory-flags', '--testing-d8-test-runner', '--no-fail' '--exit-on-contradictory-flags', '--testing-d8-test-runner', '--no-fail'
] ]
def _get_statusfile_variables(self, options): def _get_statusfile_variables(self):
variables = ( variables = (
super(NumFuzzer, self)._get_statusfile_variables(options)) super(NumFuzzer, self)._get_statusfile_variables())
variables.update({ variables.update({
'deopt_fuzzer': bool(options.stress_deopt), 'deopt_fuzzer': bool(self.options.stress_deopt),
'endurance_fuzzer': bool(options.combine_tests), 'endurance_fuzzer': bool(self.options.combine_tests),
'gc_stress': bool(options.stress_gc), 'gc_stress': bool(self.options.stress_gc),
'gc_fuzzer': bool(max([options.stress_marking, 'gc_fuzzer': bool(max([self.options.stress_marking,
options.stress_scavenge, self.options.stress_scavenge,
options.stress_compaction, self.options.stress_compaction,
options.stress_gc, self.options.stress_gc,
options.stress_delay_tasks, self.options.stress_delay_tasks,
options.stress_stack_size, self.options.stress_stack_size,
options.stress_thread_pool_size])), self.options.stress_thread_pool_size])),
}) })
return variables return variables
def _do_execute(self, tests, args, options): def _do_execute(self, tests, args):
loader = LoadProc(tests) loader = LoadProc(tests)
fuzzer_rng = random.Random(options.fuzzer_random_seed) combiner = CombinerProc.create(self.options)
results = ResultsTracker.create(self.options)
combiner = self._create_combiner(fuzzer_rng, options) execproc = ExecutionProc(self.options.j)
results = self._create_result_tracker(options)
execproc = ExecutionProc(options.j)
sigproc = self._create_signal_proc() sigproc = self._create_signal_proc()
indicators = self._create_progress_indicators( indicators = self._create_progress_indicators(
tests.test_count_estimate, options) tests.test_count_estimate)
procs = [ procs = [
loader, loader,
NameFilterProc(args) if args else None, NameFilterProc(args) if args else None,
StatusFileFilterProc(None, None), StatusFileFilterProc(None, None),
# TODO(majeski): Improve sharding when combiner is present. Maybe select # TODO(majeski): Improve sharding when combiner is present. Maybe select
# different random seeds for shards instead of splitting tests. # different random seeds for shards instead of splitting tests.
self._create_shard_proc(options), ShardProc.create(self.options),
ExpectationProc(), ExpectationProc(),
combiner, combiner,
self._create_fuzzer(fuzzer_rng, options), fuzzer.FuzzerProc.create(self.options),
sigproc, sigproc,
] + indicators + [ ] + indicators + [
results, results,
self._create_timeout_proc(options), TimeoutProc.create(self.options),
self._create_rerun_proc(options), RerunProc.create(self.options),
execproc, execproc,
] ]
self._prepare_procs(procs) self._prepare_procs(procs)
loader.load_initial_tests(initial_batch_size=float('inf')) loader.load_initial_tests()
# TODO(majeski): maybe some notification from loader would be better? # TODO(majeski): maybe some notification from loader would be better?
if combiner: if combiner:
combiner.generate_initial_tests(options.j * 4) combiner.generate_initial_tests(self.options.j * 4)
# This starts up worker processes and blocks until all tests are # This starts up worker processes and blocks until all tests are
# processed. # processed.
...@@ -190,48 +191,9 @@ class NumFuzzer(base_runner.BaseTestRunner): ...@@ -190,48 +191,9 @@ class NumFuzzer(base_runner.BaseTestRunner):
# Indicate if a SIGINT or SIGTERM happened. # Indicate if a SIGINT or SIGTERM happened.
return sigproc.exit_code return sigproc.exit_code
def _is_testsuite_supported(self, suite, options): def _is_testsuite_supported(self, suite):
return not options.combine_tests or suite.test_combiner_available() return not self.options.combine_tests or suite.test_combiner_available()
def _create_combiner(self, rng, options):
if not options.combine_tests:
return None
return CombinerProc(rng, options.combine_min, options.combine_max,
options.tests_count)
def _create_fuzzer(self, rng, options):
return fuzzer.FuzzerProc(
rng,
self._tests_count(options),
self._create_fuzzer_configs(options),
self._disable_analysis(options),
)
def _tests_count(self, options):
if options.combine_tests:
return 1
return options.tests_count
def _disable_analysis(self, options):
"""Disable analysis phase when options are used that don't support it."""
return options.combine_tests
def _create_fuzzer_configs(self, options):
fuzzers = []
def add(name, prob, *args):
if prob:
fuzzers.append(fuzzer.create_fuzzer_config(name, prob, *args))
add('compaction', options.stress_compaction)
add('marking', options.stress_marking)
add('scavenge', options.stress_scavenge)
add('gc_interval', options.stress_gc)
add('stack', options.stress_stack_size)
add('threads', options.stress_thread_pool_size)
add('delay', options.stress_delay_tasks)
add('deopt', options.stress_deopt, options.stress_deopt_min)
return fuzzers
if __name__ == '__main__': if __name__ == '__main__':
sys.exit(NumFuzzer().execute()) sys.exit(NumFuzzer().execute()) # pragma: no cover
This diff is collapsed.
This diff is collapsed.
{
"current_cpu": "x64",
"dcheck_always_on": false,
"is_android": false,
"is_asan": false,
"is_cfi": false,
"is_clang": true,
"is_component_build": false,
"is_debug": false,
"is_full_debug": false,
"is_gcov_coverage": false,
"is_ubsan_vptr": false,
"is_msan": false,
"is_tsan": false,
"target_cpu": "x64",
"v8_current_cpu": "x64",
"v8_enable_i18n_support": true,
"v8_enable_verify_predictable": false,
"v8_target_cpu": "x64",
"v8_enable_concurrent_marking": true,
"v8_enable_verify_csa": false,
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": true,
"v8_enable_pointer_compression_shared_cage": true,
"v8_enable_sandbox": false,
"v8_enable_shared_ro_heap": true,
"v8_control_flow_integrity": false,
"v8_enable_single_generation": false,
"v8_enable_third_party_heap": false,
"v8_enable_webassembly": true,
"v8_dict_property_const_tracking": false
}
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
[
[ALWAYS, {
'raspberries': FAIL,
'strawberries': [PASS, ['mode == release', SLOW], ['mode == debug', NO_VARIANTS]],
'mangoes': [PASS, SLOW],
# Both cherries and apples are to test how PASS an FAIL from different
# sections are merged.
'cherries': [PASS, SLOW],
'apples': [FAIL],
# Unused rule.
'carrots': [PASS, FAIL],
}],
['variant == nooptimization', {
'strawberries': [SKIP],
}],
['arch == x64', {
'cherries': [FAIL],
'apples': [PASS, SLOW],
# Unused rule.
'regress/*': [CRASH],
}],
['asan', {
'bananas': [PASS, NO_VARIANTS],
'raspberries': [FAIL, NO_VARIANTS],
}],
]
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Dummy test suite extension with some fruity tests.
"""
from testrunner.local import testsuite
from testrunner.objects import testcase
class TestLoader(testsuite.TestLoader):
def _list_test_filenames(self):
return [
'bananas', 'apples', 'cherries', 'mangoes', 'strawberries',
'blackberries', 'raspberries',
]
class TestSuite(testsuite.TestSuite):
def _test_loader_class(self):
return TestLoader
def _test_class(self):
return TestCase
class TestCase(testcase.D8TestCase):
def get_shell(self):
return 'd8_mocked.py'
def _get_files_params(self):
return [self.name]
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
...@@ -7,6 +7,13 @@ from collections import defaultdict ...@@ -7,6 +7,13 @@ from collections import defaultdict
from . import base from . import base
class CombinerProc(base.TestProc): class CombinerProc(base.TestProc):
@staticmethod
def create(options):
if not options.combine_tests:
return None
return CombinerProc(options.fuzzer_rng(), options.combine_min, options.combine_max,
options.tests_count)
def __init__(self, rng, min_group_size, max_group_size, count): def __init__(self, rng, min_group_size, max_group_size, count):
""" """
Args: Args:
......
...@@ -138,6 +138,15 @@ class Fuzzer(object): ...@@ -138,6 +138,15 @@ class Fuzzer(object):
# TODO(majeski): Allow multiple subtests to run at once. # TODO(majeski): Allow multiple subtests to run at once.
class FuzzerProc(base.TestProcProducer): class FuzzerProc(base.TestProcProducer):
@staticmethod
def create(options):
return FuzzerProc(
options.fuzzer_rng(),
options.fuzzer_tests_count(),
options.fuzzer_configs(),
options.combine_tests,
)
def __init__(self, rng, count, fuzzers, disable_analysis=False): def __init__(self, rng, count, fuzzers, disable_analysis=False):
""" """
Args: Args:
......
...@@ -4,24 +4,25 @@ ...@@ -4,24 +4,25 @@
from . import base from . import base
class LoadProc(base.TestProc): class LoadProc(base.TestProc):
"""First processor in the chain that passes all tests to the next processor. """First processor in the chain that passes all tests to the next processor.
""" """
def __init__(self, tests): def __init__(self, tests, initial_batch_size=float('inf')):
super(LoadProc, self).__init__() super(LoadProc, self).__init__()
self.tests = tests self.tests = tests
self.initial_batch_size = initial_batch_size
def load_initial_tests(self, initial_batch_size): def load_initial_tests(self):
""" """
Args: Args:
exec_proc: execution processor that the tests are being loaded into exec_proc: execution processor that the tests are being loaded into
initial_batch_size: initial number of tests to load initial_batch_size: initial number of tests to load
""" """
loaded_tests = 0 loaded_tests = 0
while loaded_tests < initial_batch_size: while loaded_tests < self.initial_batch_size:
try: try:
t = next(self.tests) t = next(self.tests)
except StopIteration: except StopIteration:
......
...@@ -24,6 +24,10 @@ def print_failure_header(test, is_flaky=False): ...@@ -24,6 +24,10 @@ def print_failure_header(test, is_flaky=False):
class ResultsTracker(base.TestProcObserver): class ResultsTracker(base.TestProcObserver):
@staticmethod
def create(options):
return ResultsTracker(options.exit_after_n_failures)
"""Tracks number of results and stops to run tests if max_failures reached.""" """Tracks number of results and stops to run tests if max_failures reached."""
def __init__(self, max_failures): def __init__(self, max_failures):
super(ResultsTracker, self).__init__() super(ResultsTracker, self).__init__()
...@@ -58,6 +62,10 @@ class ProgressIndicator(base.TestProcObserver): ...@@ -58,6 +62,10 @@ class ProgressIndicator(base.TestProcObserver):
def configure(self, options): def configure(self, options):
self.options = options self.options = options
def set_test_count(self, test_count):
self._total = test_count
class SimpleProgressIndicator(ProgressIndicator): class SimpleProgressIndicator(ProgressIndicator):
def __init__(self): def __init__(self):
...@@ -246,9 +254,6 @@ class CompactProgressIndicator(ProgressIndicator): ...@@ -246,9 +254,6 @@ class CompactProgressIndicator(ProgressIndicator):
self._passed = 0 self._passed = 0
self._failed = 0 self._failed = 0
def set_test_count(self, test_count):
self._total = test_count
def _on_result_for(self, test, result): def _on_result_for(self, test, result):
# TODO(majeski): Support for dummy/grouped results # TODO(majeski): Support for dummy/grouped results
if result.has_unexpected_output: if result.has_unexpected_output:
......
...@@ -9,6 +9,13 @@ from .result import RerunResult ...@@ -9,6 +9,13 @@ from .result import RerunResult
class RerunProc(base.TestProcProducer): class RerunProc(base.TestProcProducer):
@staticmethod
def create(options):
if not options.rerun_failures_count:
return None
return RerunProc(options.rerun_failures_count,
options.rerun_failures_max)
def __init__(self, rerun_max, rerun_max_total=None): def __init__(self, rerun_max, rerun_max_total=None):
super(RerunProc, self).__init__('Rerun') super(RerunProc, self).__init__('Rerun')
self._requirement = base.DROP_OUTPUT self._requirement = base.DROP_OUTPUT
......
...@@ -64,7 +64,7 @@ class TestSequenceProc(unittest.TestCase): ...@@ -64,7 +64,7 @@ class TestSequenceProc(unittest.TestCase):
def _test(self, tests, batch_size, max_heavy): def _test(self, tests, batch_size, max_heavy):
# Set up a simple processing pipeline: # Set up a simple processing pipeline:
# Loader -> observe results -> sequencer -> execution. # Loader -> observe results -> sequencer -> execution.
loader = LoadProc(iter(tests)) loader = LoadProc(iter(tests), initial_batch_size=batch_size)
results = FakeResultObserver() results = FakeResultObserver()
sequence_proc = SequenceProc(max_heavy) sequence_proc = SequenceProc(max_heavy)
execution = FakeExecutionProc() execution = FakeExecutionProc()
...@@ -74,7 +74,7 @@ class TestSequenceProc(unittest.TestCase): ...@@ -74,7 +74,7 @@ class TestSequenceProc(unittest.TestCase):
# Fill the execution queue (with the number of tests potentially # Fill the execution queue (with the number of tests potentially
# executed in parallel). # executed in parallel).
loader.load_initial_tests(batch_size) loader.load_initial_tests()
# Simulate the execution test by test. # Simulate the execution test by test.
while execution.tests: while execution.tests:
......
...@@ -18,6 +18,13 @@ def radix_hash(capacity, key): ...@@ -18,6 +18,13 @@ def radix_hash(capacity, key):
class ShardProc(base.TestProcFilter): class ShardProc(base.TestProcFilter):
@staticmethod
def create(options):
myid, count = options.shard_info()
if count == 1:
return None
return ShardProc(myid, count)
"""Processor distributing tests between shards. """Processor distributing tests between shards.
It hashes the unique test identifiers uses the hash to shard tests. It hashes the unique test identifiers uses the hash to shard tests.
""" """
......
...@@ -8,6 +8,12 @@ from . import base ...@@ -8,6 +8,12 @@ from . import base
class TimeoutProc(base.TestProcObserver): class TimeoutProc(base.TestProcObserver):
@staticmethod
def create(options):
if not options.total_timeout_sec:
return None
return TimeoutProc(options.total_timeout_sec)
def __init__(self, duration_sec): def __init__(self, duration_sec):
super(TimeoutProc, self).__init__() super(TimeoutProc, self).__init__()
self._duration_sec = duration_sec self._duration_sec = duration_sec
......
# Copyright 2022 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import random
from testrunner.testproc import fuzzer
class AugmentedOptions(optparse.Values):
"""This class will augment exiting options object with
a couple of convenient methods and properties.
"""
@staticmethod
def augment(options_object):
options_object.__class__ = AugmentedOptions
return options_object
def fuzzer_rng(self):
if not getattr(self,'_fuzzer_rng', None):
self._fuzzer_rng = random.Random(self.fuzzer_random_seed)
return self._fuzzer_rng
def shard_info(self):
"""
Returns pair:
(id of the current shard [1; number of shards], number of shards)
"""
# Read gtest shard configuration from environment (e.g. set by swarming).
# If none is present, use values passed on the command line.
count = int(
os.environ.get('GTEST_TOTAL_SHARDS', self.shard_count))
run = os.environ.get('GTEST_SHARD_INDEX')
# The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
run = int(run) + 1 if run else self.shard_run
if self.shard_count > 1:
# Log if a value was passed on the cmd line and it differs from the
# environment variables.
if self.shard_count != count: # pragma: no cover
print("shard_count from cmd line differs from environment variable "
"GTEST_TOTAL_SHARDS")
if (self.shard_run > 1 and
self.shard_run != run): # pragma: no cover
print("shard_run from cmd line differs from environment variable "
"GTEST_SHARD_INDEX")
if run < 1 or run > count:
# TODO(machenbach): Turn this into an assert. If that's wrong on the
# bots, printing will be quite useless. Or refactor this code to make
# sure we get a return code != 0 after testing if we got here.
print("shard-run not a valid number, should be in [1:shard-count]")
print("defaulting back to running all tests")
return 0, 1
return run - 1, count # coming back to 0 based counting
def fuzzer_configs(self):
fuzzers = []
def add(name, prob, *args):
if prob:
fuzzers.append(fuzzer.create_fuzzer_config(name, prob, *args))
add('compaction', self.stress_compaction)
add('marking', self.stress_marking)
add('scavenge', self.stress_scavenge)
add('gc_interval', self.stress_gc)
add('stack', self.stress_stack_size)
add('threads', self.stress_thread_pool_size)
add('delay', self.stress_delay_tasks)
add('deopt', self.stress_deopt, self.stress_deopt_min)
return fuzzers
def fuzzer_tests_count(self):
if self.combine_tests:
return 1
return self.tests_count
# Copyright 2022 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import json
import os
import shutil
import sys
import tempfile
import unittest
from dataclasses import dataclass
from io import StringIO
from os.path import dirname as up
TOOLS_ROOT = up(up(up(os.path.abspath(__file__))))
sys.path.append(TOOLS_ROOT)
TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'testrunner', 'testdata')
from testrunner.local import command
from testrunner.local import pool
@contextlib.contextmanager
def temp_dir():
"""Wrapper making a temporary directory available."""
path = None
try:
path = tempfile.mkdtemp('v8_test_')
yield path
finally:
if path:
shutil.rmtree(path)
@contextlib.contextmanager
def temp_base(baseroot='testroot1'):
"""Wrapper that sets up a temporary V8 test root.
Args:
baseroot: The folder with the test root blueprint. All files will be
copied to the temporary test root, to guarantee a fresh setup with no
dirty state.
"""
basedir = os.path.join(TEST_DATA_ROOT, baseroot)
with temp_dir() as tempbase:
if os.path.exists(basedir):
shutil.copytree(basedir, tempbase, dirs_exist_ok=True)
yield tempbase
@contextlib.contextmanager
def capture():
"""Wrapper that replaces system stdout/stderr an provides the streams."""
oldout = sys.stdout
olderr = sys.stderr
try:
stdout=StringIO()
stderr=StringIO()
sys.stdout = stdout
sys.stderr = stderr
yield stdout, stderr
finally:
sys.stdout = oldout
sys.stderr = olderr
def with_json_output(basedir):
""" Function used as a placeholder where we need to resolve the value in the
context of a temporary test configuration"""
return os.path.join(basedir, 'out.json')
def clean_json_output(json_path, basedir):
# Extract relevant properties of the json output.
if not json_path:
return None
with open(json_path) as f:
json_output = json.load(f)
# Replace duration in actual output as it's non-deterministic. Also
# replace the python executable prefix as it has a different absolute
# path dependent on where this runs.
def replace_variable_data(data):
data['duration'] = 1
data['command'] = ' '.join(
['/usr/bin/python'] + data['command'].split()[1:])
data['command'] = data['command'].replace(basedir + '/', '')
for data in json_output['slowest_tests']:
replace_variable_data(data)
for data in json_output['results']:
replace_variable_data(data)
json_output['duration_mean'] = 1
# We need lexicographic sorting here to avoid non-deterministic behaviour
# The original sorting key is duration, but in our fake test we have
# non-deterministic durations before we reset them to 1
def sort_key(x):
return str(sorted(x.items()))
json_output['slowest_tests'].sort(key=sort_key)
return json_output
def override_build_config(basedir, **kwargs):
"""Override the build config with new values provided as kwargs."""
if not kwargs:
return
path = os.path.join(basedir, 'out', 'build', 'v8_build_config.json')
with open(path) as f:
config = json.load(f)
config.update(kwargs)
with open(path, 'w') as f:
json.dump(config, f)
@dataclass
class TestResult():
stdout: str
stderr: str
returncode: int
json: str
current_test_case: unittest.TestCase
def __str__(self):
return f'\nReturncode: {self.returncode}\nStdout:\n{self.stdout}\nStderr:\n{self.stderr}\n'
def has_returncode(self, code):
self.current_test_case.assertEqual(code, self.returncode, self)
def stdout_includes(self, content):
self.current_test_case.assertIn(content, self.stdout, self)
def stdout_excludes(self, content):
self.current_test_case.assertNotIn(content, self.stdout, self)
def stderr_includes(self, content):
self.current_test_case.assertIn(content, self.stderr, self)
def stderr_excludes(self, content):
self.current_test_case.assertNotIn(content, self.stderr, self)
def json_content_equals(self, expected_results_file):
with open(os.path.join(TEST_DATA_ROOT, expected_results_file)) as f:
expected_test_results = json.load(f)
pretty_json = json.dumps(self.json, indent=2, sort_keys=True)
msg = None # Set to pretty_json for bootstrapping.
self.current_test_case.assertDictEqual(self.json, expected_test_results, msg)
class TestRunnerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
command.setup_testing()
pool.setup_testing()
def run_tests(self, *args, baseroot='testroot1', config_overrides={}, **kwargs):
"""Executes the test runner with captured output."""
with temp_base(baseroot=baseroot) as basedir:
override_build_config(basedir, **config_overrides)
json_out_path = None
def resolve_arg(arg):
"""Some arguments come as function objects to be called (resolved)
in the context of a temporary test configuration"""
nonlocal json_out_path
if arg == with_json_output:
json_out_path = with_json_output(basedir)
return json_out_path
return arg
resolved_args = [resolve_arg(arg) for arg in args]
with capture() as (stdout, stderr):
sys_args = ['--command-prefix', sys.executable] + resolved_args
if kwargs.get('infra_staging', False):
sys_args.append('--infra-staging')
else:
sys_args.append('--no-infra-staging')
code = self.get_runner_class()(basedir=basedir).execute(sys_args)
json_out = clean_json_output(json_out_path, basedir)
return TestResult(stdout.getvalue(), stderr.getvalue(), code, json_out, self)
def get_runner_class():
"""Implement to return the runner class"""
return None
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment