Commit 5ede3cb5 authored by Michal Majewski's avatar Michal Majewski Committed by Commit Bot

Reland "[test] Random seed processor"

This is a reland of 0db74d49.

Original change's description:
> [test] Random seed processor
> 
> 1. --total-timeout-sec now available for ./run-tests.py. It can be
> useful with infinite seed stressing
> 2. random seed dropped from the context. Now JSON progress indicator
> gets it from the list of command args.
> 
> Bug: v8:6917
> Cq-Include-Trybots: luci.v8.try:v8_linux_noi18n_rel_ng
> Change-Id: I73e535bc8face9b913c696b8d5e3a246fa231004
> Reviewed-on: https://chromium-review.googlesource.com/888524
> Commit-Queue: Michał Majewski <majeski@google.com>
> Reviewed-by: Michael Achenbach <machenbach@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#50964}

Bug: v8:6917
Change-Id: I1ea376a4abffce5ab65f4834ea7e6d6011765ffa
Cq-Include-Trybots: luci.v8.try:v8_linux_noi18n_rel_ng
Reviewed-on: https://chromium-review.googlesource.com/894204Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Michał Majewski <majeski@google.com>
Cr-Commit-Position: refs/heads/master@{#50978}
parent 768c41c8
......@@ -35,9 +35,9 @@ from testrunner.objects import testcase
class TestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(TestSuite, self).__init__(name, root)
self.testroot = os.path.join(root, "data")
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
self.testroot = os.path.join(self.root, "data")
def ListTests(self, context):
tests = map(self._create_test, [
......@@ -143,5 +143,5 @@ class SuppressedTimeoutTestCase(TestCase):
self.expected_outcomes = self.expected_outcomes + [statusfile.TIMEOUT]
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -67,5 +67,5 @@ class TestCase(testcase.TestCase):
return [self.path]
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -78,5 +78,5 @@ class TestCase(testcase.TestCase):
return os.path.join(self.suite.root, self.path + self._get_suffix())
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -60,5 +60,5 @@ class TestCase(testcase.TestCase):
return 'v8_simple_%s_fuzzer' % group
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -65,5 +65,5 @@ class TestCase(testcase.TestCase):
os.path.join(self.suite.root, self.path) + EXPECTED_SUFFIX)
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -82,5 +82,5 @@ class TestCase(testcase.TestCase):
return os.path.join(self.suite.root, self.path + self._get_suffix())
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -103,5 +103,5 @@ class TestCase(testcase.TestCase):
self._expected_fail())
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -179,7 +179,8 @@ class CombinedTest(testcase.TestCase):
passed as arguments.
"""
def __init__(self, name, tests):
super(CombinedTest, self).__init__(tests[0].suite, '', name)
super(CombinedTest, self).__init__(tests[0].suite, '', name,
tests[0]._test_config)
self._tests = tests
def _prepare_outcomes(self, force_update=True):
......@@ -195,8 +196,6 @@ class CombinedTest(testcase.TestCase):
"""
shell = 'd8'
shell_flags = ['--test', '--disable-abortjs', '--quiet-load']
if ctx.random_seed:
shell_flags.append('--random-seed=%s' % ctx.random_seed)
return shell, shell_flags
def _get_cmd_params(self, ctx):
......@@ -238,5 +237,5 @@ class SuppressedTestCase(TestCase):
)
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -44,5 +44,5 @@ class TestCase(testcase.TestCase):
return mkgrokdump.OutProc(self.expected_outcomes, self.suite.expected_path)
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -56,9 +56,9 @@ TEST_DIRS = """
class TestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(TestSuite, self).__init__(name, root)
self.testroot = os.path.join(root, "data")
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
self.testroot = os.path.join(self.root, "data")
def ListTests(self, context):
tests = []
......@@ -118,5 +118,5 @@ class TestCase(testcase.TestCase):
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -85,8 +85,8 @@ class TestSuite(testsuite.TestSuite):
class TestCase(testcase.TestCase):
def __init__(self, suite, path, name, source, template_flags):
super(TestCase, self).__init__(suite, path, name)
def __init__(self, suite, path, name, test_config, source, template_flags):
super(TestCase, self).__init__(suite, path, name, test_config)
self._source = source
self._template_flags = template_flags
......@@ -113,5 +113,5 @@ class TestCase(testcase.TestCase):
return self._source
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -116,8 +116,8 @@ class TestSuite(testsuite.TestSuite):
# Match the (...) in '/path/to/v8/test/test262/subdir/test/(...).js'
# In practice, subdir is data or local-tests
def __init__(self, name, root):
super(TestSuite, self).__init__(name, root)
def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(*args, **kwargs)
self.testroot = os.path.join(self.root, *TEST_262_SUITE_PATH)
self.harnesspath = os.path.join(self.root, *TEST_262_HARNESS_PATH)
self.harness = [os.path.join(self.harnesspath, f)
......@@ -250,5 +250,5 @@ class TestCase(testcase.TestCase):
return test262.NoExceptionOutProc(self.expected_outcomes)
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -66,7 +66,7 @@ class TestCase(testcase.TestCase):
def _get_suite_flags(self, ctx):
return (
["--gtest_filter=" + self.path] +
["--gtest_random_seed=%s" % ctx.random_seed] +
["--gtest_random_seed=%s" % self.random_seed] +
["--gtest_print_time=0"]
)
......@@ -74,5 +74,5 @@ class TestCase(testcase.TestCase):
return self.suite.name
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -31,5 +31,5 @@ class TestCase(testcase.TestCase):
return [os.path.join(self.suite.root, self.path + self._get_suffix())]
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -109,5 +109,5 @@ class TestCase(testcase.TestCase):
os.path.join(self.suite.root, self.path) + '-expected.txt')
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -17,10 +17,11 @@ sys.path.insert(
os.path.dirname(os.path.abspath(__file__))))
from local import testsuite
from local import utils
from testproc.shard import ShardProc
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.test_config import TestConfig
from testrunner.testproc.shard import ShardProc
from testrunner.testproc.timeout import TimeoutProc
BASE_DIR = (
......@@ -215,7 +216,7 @@ class BaseTestRunner(object):
raise
args = self._parse_test_args(args)
suites = self._get_suites(args, options.verbose)
suites = self._get_suites(args, options)
self._setup_env()
return self._do_execute(suites, args, options)
......@@ -255,6 +256,8 @@ class BaseTestRunner(object):
parser.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
parser.add_option("--total-timeout-sec", default=0, type="int",
help="How long should fuzzer run")
# TODO(machenbach): Temporary options for rolling out new test runner
# features.
......@@ -480,9 +483,9 @@ class BaseTestRunner(object):
return reduce(list.__add__, map(expand_test_group, args), [])
def _get_suites(self, args, verbose=False):
def _get_suites(self, args, options):
names = self._args_to_suite_names(args)
return self._load_suites(names, verbose)
return self._load_suites(names, options)
def _args_to_suite_names(self, args):
# Use default tests if no test configuration was provided at the cmd line.
......@@ -496,14 +499,19 @@ class BaseTestRunner(object):
def _expand_test_group(self, name):
return TEST_MAP.get(name, [name])
def _load_suites(self, names, verbose=False):
def _load_suites(self, names, options):
test_config = self._create_test_config(options)
def load_suite(name):
if verbose:
if options.verbose:
print '>>> Loading test suite: %s' % name
return testsuite.TestSuite.LoadTestSuite(
os.path.join(self.basedir, 'test', name))
os.path.join(self.basedir, 'test', name),
test_config)
return map(load_suite, names)
def _create_test_config(self, options):
return TestConfig(options.random_seed)
# TODO(majeski): remove options & args parameters
def _do_execute(self, suites, args, options):
raise NotImplementedError()
......@@ -550,3 +558,8 @@ class BaseTestRunner(object):
return 1, 1
return shard_run, shard_count
def _create_timeout_proc(self, options):
if not options.total_timeout_sec:
return None
return TimeoutProc(options.total_timeout_sec)
......@@ -321,18 +321,17 @@ class JUnitTestProgressIndicator(ProgressIndicator):
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, json_test_results, arch, mode, random_seed):
def __init__(self, json_test_results, arch, mode):
super(JsonTestProgressIndicator, self).__init__()
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
self.random_seed = random_seed
self.results = []
self.tests = []
def ToProgressIndicatorProc(self):
return progress_proc.JsonTestProgressIndicator(
self.json_test_results, self.arch, self.mode, self.random_seed)
self.json_test_results, self.arch, self.mode)
def Done(self):
complete_results = []
......@@ -380,6 +379,16 @@ class JsonTestProgressIndicator(ProgressIndicator):
# will have unexpected_output to be reported here has well.
return
random_seed = None
for i, flag in enumerate(reversed(test.cmd.args)):
if 'random-seed' in flag:
if '=' in flag:
random_seed = flag.split('=')[1]
break
elif i > 0:
random_seed = test.cmd.args[i - 1]
break
self.results.append({
"name": str(test),
"flags": test.cmd.args,
......@@ -391,10 +400,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
"result": test.output_proc.get_outcome(output),
"expected": test.expected_outcomes,
"duration": output.duration,
# TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress.
"random_seed": self.random_seed,
"random_seed": int(random_seed),
"target_name": test.get_shell(),
"variant": test.variant,
})
......
......@@ -100,21 +100,21 @@ class TestCombiner(object):
class TestSuite(object):
@staticmethod
def LoadTestSuite(root):
def LoadTestSuite(root, test_config):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module(name + "_testcfg", f, pathname, description)
return module.GetSuite(name, root)
return module.GetSuite(name, root, test_config)
finally:
if f:
f.close()
def __init__(self, name, root):
# Note: This might be called concurrently from different processes.
def __init__(self, name, root, test_config):
self.name = name # string
self.root = root # string containing path
self.test_config = test_config
self.tests = None # list of TestCase objects
self.statusfile = None
self.suppress_internals = False
......@@ -242,8 +242,8 @@ class TestSuite(object):
test_class = self._suppressed_test_class()
else:
test_class = self._test_class()
test = test_class(self, path, self._path_to_name(path), **kwargs)
return test
return test_class(self, path, self._path_to_name(path), self.test_config,
**kwargs)
def _suppressed_test_class(self):
"""Optional testcase that suppresses assertions. Used by fuzzers that are
......
......@@ -25,7 +25,7 @@ from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc
from testrunner.testproc.progress import ResultsTracker, TestsCounter
from testrunner.testproc.rerun import RerunProc
from testrunner.testproc.timeout import TimeoutProc
from testrunner.utils import random_utils
DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
......@@ -61,7 +61,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
default="mono")
parser.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int")
parser.add_option("--random-seed", default=0,
parser.add_option("--random-seed", default=0, type=int,
help="Default seed for initializing random generator")
parser.add_option("--fuzzer-random-seed", default=0,
help="Default seed for initializing fuzzer random "
......@@ -82,9 +82,6 @@ class NumFuzzer(base_runner.BaseTestRunner):
"value 0 to provide infinite number of subtests. "
"When --combine-tests is set it indicates how many "
"tests to create in total")
parser.add_option("--total-timeout-sec", default=0, type="int",
help="How long should fuzzer run. It overrides "
"--tests-count")
# Stress gc
parser.add_option("--stress-marking", default=0, type="int",
......@@ -130,12 +127,8 @@ class NumFuzzer(base_runner.BaseTestRunner):
options.extra_flags = shlex.split(options.extra_flags)
if options.j == 0:
options.j = multiprocessing.cpu_count()
while options.random_seed == 0:
options.random_seed = random.SystemRandom().randint(-2147483648,
2147483647)
while options.fuzzer_random_seed == 0:
options.fuzzer_random_seed = random.SystemRandom().randint(-2147483648,
2147483647)
if not options.fuzzer_random_seed:
options.fuzzer_random_seed = random_utils.random_seed()
if options.total_timeout_sec:
options.tests_count = 0
......@@ -165,8 +158,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
progress_indicator.Register(progress.JsonTestProgressIndicator(
options.json_test_results,
self.build_config.arch,
self.mode_options.execution_mode,
ctx.random_seed))
self.mode_options.execution_mode))
loader = LoadProc()
fuzzer_rng = random.Random(options.fuzzer_random_seed)
......@@ -234,7 +226,6 @@ class NumFuzzer(base_runner.BaseTestRunner):
options.command_prefix,
options.extra_flags,
False, # Keep i18n on by default.
options.random_seed,
True, # No sorting of test cases.
options.rerun_failures_count,
options.rerun_failures_max,
......@@ -336,11 +327,6 @@ class NumFuzzer(base_runner.BaseTestRunner):
add('deopt', options.stress_deopt, options.stress_deopt_min)
return fuzzers
def _create_timeout_proc(self, options):
if not options.total_timeout_sec:
return None
return TimeoutProc(options.total_timeout_sec)
def _create_rerun_proc(self, options):
if not options.rerun_failures_count:
return None
......
......@@ -28,24 +28,27 @@
class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
isolates, command_prefix, extra_flags, noi18n, random_seed,
no_sorting, rerun_failures_count, rerun_failures_max, no_harness,
use_perf_data, sancov_dir, infra_staging=False):
isolates, command_prefix, extra_flags, noi18n, no_sorting,
rerun_failures_count, rerun_failures_max, no_harness,
use_perf_data, sancov_dir):
# Used by perfdata
self.arch = arch
self.mode = mode
self.shell_dir = shell_dir
self.mode_flags = mode_flags
self.verbose = verbose
self.timeout = timeout
self.isolates = isolates
self.no_sorting = no_sorting
self.use_perf_data = use_perf_data
# Used by testcase to create command
self.command_prefix = command_prefix
self.extra_flags = extra_flags
self.isolates = isolates
self.mode_flags = mode_flags
self.no_harness = no_harness
self.noi18n = noi18n
self.random_seed = random_seed
self.no_sorting = no_sorting
self.shell_dir = shell_dir
self.timeout = timeout
self.verbose = verbose
# Will be deprecated after moving to test processors
self.rerun_failures_count = rerun_failures_count
self.rerun_failures_max = rerun_failures_max
self.no_harness = no_harness
self.use_perf_data = use_perf_data
self.sancov_dir = sancov_dir
self.infra_staging = infra_staging
......@@ -40,7 +40,7 @@ FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
class TestCase(object):
def __init__(self, suite, path, name):
def __init__(self, suite, path, name, test_config):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
......@@ -59,18 +59,26 @@ class TestCase(object):
self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
self.keep_output = False # Can output of this test be dropped
# Test config contains information needed to build the command.
# TODO(majeski): right now it contains only random seed.
self._test_config = test_config
# Overrides default random seed from test_config if specified.
self._random_seed = None
self._statusfile_outcomes = None
self._expected_outcomes = None # optimization: None == [statusfile.PASS]
self.expected_outcomes = None
self._statusfile_flags = None
self._prepare_outcomes()
def create_subtest(self, processor, subtest_id, variant=None, flags=None,
keep_output=False):
keep_output=False, random_seed=None):
subtest = copy.copy(self)
subtest.origin = self
subtest.processor = processor
subtest.procid += '.%s' % subtest_id
subtest.keep_output |= keep_output
if random_seed:
subtest._random_seed = random_seed
if flags:
subtest.variant_flags = subtest.variant_flags + flags
if variant is not None:
......@@ -79,7 +87,8 @@ class TestCase(object):
subtest._prepare_outcomes()
return subtest
def create_variant(self, variant, flags, procid_suffix=None):
def create_variant(self, variant, flags, procid_suffix=None,
random_seed=None):
"""Makes a shallow copy of the object and updates variant, variant flags and
all fields that depend on it, e.g. expected outcomes.
......@@ -88,6 +97,8 @@ class TestCase(object):
flags - flags that should be added to origin test's variant flags
procid_suffix - for multiple variants with the same name set suffix to
keep procid unique.
random_seed - random seed to use in this variant. None means use base
test's random seed.
"""
other = copy.copy(self)
if not self.variant_flags:
......@@ -100,6 +111,9 @@ class TestCase(object):
else:
other.procid += '[%s]' % variant
if random_seed:
other._random_seed = random_seed
other._prepare_outcomes(variant != self.variant)
return other
......@@ -171,6 +185,7 @@ class TestCase(object):
def _get_cmd_params(self, ctx):
"""Gets command parameters and combines them in the following order:
- files [empty by default]
- random seed
- extra flags (from command line)
- user flags (variant/fuzzer flags)
- statusfile flags
......@@ -182,6 +197,7 @@ class TestCase(object):
"""
return (
self._get_files_params(ctx) +
self._get_random_seed_flags() +
self._get_extra_flags(ctx) +
self._get_variant_flags() +
self._get_statusfile_flags() +
......@@ -196,6 +212,13 @@ class TestCase(object):
def _get_files_params(self, ctx):
return []
def _get_random_seed_flags(self):
return ['--random-seed=%d' % self.random_seed]
@property
def random_seed(self):
return self._random_seed or self._test_config.random_seed
def _get_extra_flags(self, ctx):
return ctx.extra_flags
......@@ -225,8 +248,6 @@ class TestCase(object):
shell_flags.append('--test')
if utils.IsWindows():
shell += '.exe'
if ctx.random_seed:
shell_flags.append('--random-seed=%s' % ctx.random_seed)
return shell, shell_flags
def _get_timeout(self, params, timeout):
......
......@@ -34,7 +34,9 @@ from testrunner.testproc.progress import (VerboseProgressIndicator,
ResultsTracker,
TestsCounter)
from testrunner.testproc.rerun import RerunProc
from testrunner.testproc.seed import SeedProc
from testrunner.testproc.variant import VariantProc
from testrunner.utils import random_utils
TIMEOUT_DEFAULT = 60
......@@ -220,7 +222,9 @@ class StandardTestRunner(base_runner.BaseTestRunner):
type=int)
parser.add_option("--random-seed-stress-count", default=1, type="int",
dest="random_seed_stress_count",
help="Number of runs with different random seeds")
help="Number of runs with different random seeds. Only "
"with test processors: 0 means infinite "
"generation.")
def _use_staging(self, options):
if options.infra_staging is not None:
......@@ -302,9 +306,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if options.j == 0:
options.j = multiprocessing.cpu_count()
if options.random_seed_stress_count <= 1 and options.random_seed == 0:
options.random_seed = self._random_seed()
if options.variants == "infra_staging":
options.variants = "exhaustive"
options.infra_staging = True
......@@ -356,12 +357,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
"allow_user_segv_handler=1",
])
def _random_seed(self):
seed = 0
while not seed:
seed = random.SystemRandom().randint(-2147483648, 2147483647)
return seed
def _execute(self, args, options, suites):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
......@@ -387,14 +382,12 @@ class StandardTestRunner(base_runner.BaseTestRunner):
options.command_prefix,
options.extra_flags,
self.build_config.no_i18n,
options.random_seed,
options.no_sorting,
options.rerun_failures_count,
options.rerun_failures_max,
options.no_harness,
use_perf_data=not options.swarming,
sancov_dir=self.sancov_dir,
infra_staging=options.infra_staging)
sancov_dir=self.sancov_dir)
# TODO(all): Combine "simulator" and "simulator_run".
# TODO(machenbach): In GN we can derive simulator run from
......@@ -441,8 +434,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
progress_indicator.Register(progress.JsonTestProgressIndicator(
options.json_test_results,
self.build_config.arch,
self.mode_options.execution_mode,
ctx.random_seed))
self.mode_options.execution_mode))
if options.flakiness_results: # pragma: no cover
progress_indicator.Register(progress.FlakinessTestProgressIndicator(
options.flakiness_results))
......@@ -480,23 +472,20 @@ class StandardTestRunner(base_runner.BaseTestRunner):
for v in variant_gen.FilterVariantsByTest(t)
for flags in variant_gen.GetFlagSets(t, v) ]
if options.random_seed_stress_count > 1:
# Duplicate test for random seed stress mode.
def iter_seed_flags():
for _ in range(0, options.random_seed_stress_count):
# Use given random seed for all runs (set by default in
# execution.py) or a new random seed if none is specified.
if options.random_seed:
yield []
else:
yield ["--random-seed=%d" % self._random_seed()]
s.tests = [
t.create_variant(t.variant, flags, 'seed-stress-%d' % n)
for t in variant_tests
for n, flags in enumerate(iter_seed_flags())
]
else:
s.tests = variant_tests
# Duplicate test for random seed stress mode.
def iter_seed_flags():
for _ in range(0, options.random_seed_stress_count or 1):
# Use given random seed for all runs (set by default in
# execution.py) or a new random seed if none is specified.
if options.random_seed:
yield options.random_seed
else:
yield random_utils.random_seed()
s.tests = [
t.create_variant(t.variant, [], 'seed-%d' % n, random_seed=val)
for t in variant_tests
for n, val in enumerate(iter_seed_flags())
]
# Second filtering by status applying also the variant-dependent rules.
if options.warn_unused:
......@@ -592,8 +581,10 @@ class StandardTestRunner(base_runner.BaseTestRunner):
tests_counter,
VariantProc(self._variants),
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
self._create_seed_proc(options),
] + indicators + [
results,
self._create_timeout_proc(options),
self._create_rerun_proc(context),
execproc,
]
......@@ -633,6 +624,11 @@ class StandardTestRunner(base_runner.BaseTestRunner):
exit_code = 0
return exit_code
def _create_seed_proc(self, options):
if options.random_seed_stress_count == 1 and options.random_seed:
return None
return SeedProc(options.random_seed_stress_count, options.random_seed)
def _create_rerun_proc(self, ctx):
if not ctx.rerun_failures_count:
return None
......
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
# TODO(majeski): Move the rest of stuff from context
class TestConfig(object):
def __init__(self, random_seed):
# random_seed is always not None.
self.random_seed = random_seed or self._gen_random_seed()
def _gen_random_seed(self):
seed = None
while not seed:
seed = random.SystemRandom().randint(-2147483648, 2147483647)
return seed
......@@ -292,7 +292,7 @@ class JUnitTestProgressIndicator(ProgressIndicator):
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, json_test_results, arch, mode, random_seed):
def __init__(self, json_test_results, arch, mode):
super(JsonTestProgressIndicator, self).__init__()
# We want to drop stdout/err for all passed tests on the first try, but we
# need to get outputs for all runs after the first one. To accommodate that,
......@@ -303,7 +303,6 @@ class JsonTestProgressIndicator(ProgressIndicator):
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
self.random_seed = random_seed
self.results = []
self.tests = []
......@@ -338,10 +337,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
"result": test.output_proc.get_outcome(output),
"expected": test.expected_outcomes,
"duration": output.duration,
# TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress.
"random_seed": self.random_seed,
"random_seed": test.random_seed,
"target_name": test.get_shell(),
"variant": test.variant,
})
......
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
from collections import defaultdict
from . import base
from ..utils import random_utils
class SeedProc(base.TestProcProducer):
def __init__(self, count, seed=None):
"""
Args:
count: How many subtests with different seeds to create for each test.
0 means infinite.
seed: seed to use. None means random seed for each subtest.
"""
super(SeedProc, self).__init__('Seed')
self._count = count
self._seed = seed
self._last_idx = defaultdict(int)
def setup(self, requirement=base.DROP_RESULT):
super(SeedProc, self).setup(requirement)
# SeedProc is optimized for dropping the result
assert requirement == base.DROP_RESULT
def _next_test(self, test):
self._try_send_next_test(test)
def _result_for(self, test, subtest, result):
self._try_send_next_test(test)
def _try_send_next_test(self, test):
def create_subtest(idx):
seed = self._seed or random_utils.random_seed()
return self._create_subtest(test, idx, random_seed=seed)
num = self._last_idx[test.procid]
if not self._count or num < self._count:
num += 1
self._send_test(create_subtest(num))
self._last_idx[test.procid] = num
else:
del self._last_idx[test.procid]
self._send_result(test, None)
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
def random_seed():
"""Returns random, non-zero seed."""
seed = 0
while not seed:
seed = random.SystemRandom().randint(-2147483648, 2147483647)
return seed
......@@ -305,6 +305,7 @@ class SystemTest(unittest.TestCase):
# flags field of the test result.
# After recent changes we report all flags, including the file names.
# This is redundant to the command. Needs investigation.
self.maxDiff = None
self.check_cleaned_json_output('expected_test_results1.json', json_path)
def testFlakeWithRerunAndJSONProc(self):
......@@ -336,6 +337,7 @@ class SystemTest(unittest.TestCase):
'Done running sweet/bananaflakes: pass', result.stdout, result)
self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result)
self.maxDiff = None
self.check_cleaned_json_output('expected_test_results2.json', json_path)
def testAutoDetect(self):
......@@ -549,7 +551,10 @@ class SystemTest(unittest.TestCase):
# timeout was used.
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithDefault(self):
def testRandomSeedStressWithDefaultProc(self):
self.testRandomSeedStressWithDefault(infra_staging=True)
def testRandomSeedStressWithDefault(self, infra_staging=False):
"""Test using random-seed-stress feature has the right number of tests."""
with temp_base() as basedir:
result = run_tests(
......@@ -559,8 +564,13 @@ class SystemTest(unittest.TestCase):
'--variants=default',
'--random-seed-stress-count=2',
'sweet/bananas',
infra_staging=infra_staging,
)
self.assertIn('Running 2 tests', result.stdout, result)
if infra_staging:
self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
else:
self.assertIn('Running 2 tests', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithSeed(self):
......
......@@ -4,15 +4,15 @@
"mode": "release",
"results": [
{
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
"command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
"--random-seed=123",
"strawberries",
"--random-seed=123",
"--nohard-abort"
],
"name": "sweet/strawberries",
......@@ -20,20 +20,20 @@
"result": "FAIL",
"run": 1,
"stderr": "",
"stdout": "--random-seed=123 strawberries --nohard-abort\n",
"stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
"command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
"--random-seed=123",
"strawberries",
"--random-seed=123",
"--nohard-abort"
],
"name": "sweet/strawberries",
......@@ -41,20 +41,20 @@
"result": "FAIL",
"run": 2,
"stderr": "",
"stdout": "--random-seed=123 strawberries --nohard-abort\n",
"stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
"command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
"--random-seed=123",
"strawberries",
"--random-seed=123",
"--nohard-abort"
],
"name": "sweet/strawberries",
......@@ -62,40 +62,40 @@
"result": "FAIL",
"run": 3,
"stderr": "",
"stdout": "--random-seed=123 strawberries --nohard-abort\n",
"stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
}
],
"slowest_tests": [
{
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
"command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
"--random-seed=123",
"strawberries",
"--random-seed=123",
"--nohard-abort"
],
"marked_slow": true,
"name": "sweet/strawberries"
},
{
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
"command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
"--random-seed=123",
"strawberries",
"--random-seed=123",
"--nohard-abort"
],
"marked_slow": true,
"name": "sweet/strawberries"
},
{
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
"command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
"--random-seed=123",
"strawberries",
"--random-seed=123",
"--nohard-abort"
],
"marked_slow": true,
......
......@@ -4,15 +4,15 @@
"mode": "release",
"results": [
{
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
"command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
"--random-seed=123",
"bananaflakes",
"--random-seed=123",
"--nohard-abort"
],
"name": "sweet/bananaflakes",
......@@ -20,20 +20,20 @@
"result": "FAIL",
"run": 1,
"stderr": "",
"stdout": "--random-seed=123 bananaflakes --nohard-abort\n",
"stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
},
{
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
"command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"exit_code": 0,
"expected": [
"PASS"
],
"flags": [
"--random-seed=123",
"bananaflakes",
"--random-seed=123",
"--nohard-abort"
],
"name": "sweet/bananaflakes",
......@@ -41,29 +41,29 @@
"result": "PASS",
"run": 2,
"stderr": "",
"stdout": "--random-seed=123 bananaflakes --nohard-abort\n",
"stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
}
],
"slowest_tests": [
{
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
"command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
"--random-seed=123",
"bananaflakes",
"--random-seed=123",
"--nohard-abort"
],
"marked_slow": false,
"name": "sweet/bananaflakes"
},
{
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort",
"command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1,
"flags": [
"--random-seed=123",
"bananaflakes",
"--random-seed=123",
"--nohard-abort"
],
"marked_slow": false,
......
......@@ -27,5 +27,5 @@ class TestCase(testcase.TestCase):
def _get_files_params(self, ctx):
return [self.name]
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
......@@ -27,5 +27,5 @@ class TestCase(testcase.TestCase):
def _get_files_params(self, ctx):
return [self.name]
def GetSuite(name, root):
return TestSuite(name, root)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment