Commit 5ede3cb5 authored by Michal Majewski's avatar Michal Majewski Committed by Commit Bot

Reland "[test] Random seed processor"

This is a reland of 0db74d49.

Original change's description:
> [test] Random seed processor
> 
> 1. --total-timeout-sec now available for ./run-tests.py. It can be
> useful with infinite seed stressing
> 2. random seed dropped from the context. Now JSON progress indicator
> gets it from the list of command args.
> 
> Bug: v8:6917
> Cq-Include-Trybots: luci.v8.try:v8_linux_noi18n_rel_ng
> Change-Id: I73e535bc8face9b913c696b8d5e3a246fa231004
> Reviewed-on: https://chromium-review.googlesource.com/888524
> Commit-Queue: Michał Majewski <majeski@google.com>
> Reviewed-by: Michael Achenbach <machenbach@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#50964}

Bug: v8:6917
Change-Id: I1ea376a4abffce5ab65f4834ea7e6d6011765ffa
Cq-Include-Trybots: luci.v8.try:v8_linux_noi18n_rel_ng
Reviewed-on: https://chromium-review.googlesource.com/894204Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Michał Majewski <majeski@google.com>
Cr-Commit-Position: refs/heads/master@{#50978}
parent 768c41c8
...@@ -35,9 +35,9 @@ from testrunner.objects import testcase ...@@ -35,9 +35,9 @@ from testrunner.objects import testcase
class TestSuite(testsuite.TestSuite): class TestSuite(testsuite.TestSuite):
def __init__(self, name, root): def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(name, root) super(TestSuite, self).__init__(*args, **kwargs)
self.testroot = os.path.join(root, "data") self.testroot = os.path.join(self.root, "data")
def ListTests(self, context): def ListTests(self, context):
tests = map(self._create_test, [ tests = map(self._create_test, [
...@@ -143,5 +143,5 @@ class SuppressedTimeoutTestCase(TestCase): ...@@ -143,5 +143,5 @@ class SuppressedTimeoutTestCase(TestCase):
self.expected_outcomes = self.expected_outcomes + [statusfile.TIMEOUT] self.expected_outcomes = self.expected_outcomes + [statusfile.TIMEOUT]
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -67,5 +67,5 @@ class TestCase(testcase.TestCase): ...@@ -67,5 +67,5 @@ class TestCase(testcase.TestCase):
return [self.path] return [self.path]
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -78,5 +78,5 @@ class TestCase(testcase.TestCase): ...@@ -78,5 +78,5 @@ class TestCase(testcase.TestCase):
return os.path.join(self.suite.root, self.path + self._get_suffix()) return os.path.join(self.suite.root, self.path + self._get_suffix())
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -60,5 +60,5 @@ class TestCase(testcase.TestCase): ...@@ -60,5 +60,5 @@ class TestCase(testcase.TestCase):
return 'v8_simple_%s_fuzzer' % group return 'v8_simple_%s_fuzzer' % group
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -65,5 +65,5 @@ class TestCase(testcase.TestCase): ...@@ -65,5 +65,5 @@ class TestCase(testcase.TestCase):
os.path.join(self.suite.root, self.path) + EXPECTED_SUFFIX) os.path.join(self.suite.root, self.path) + EXPECTED_SUFFIX)
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -82,5 +82,5 @@ class TestCase(testcase.TestCase): ...@@ -82,5 +82,5 @@ class TestCase(testcase.TestCase):
return os.path.join(self.suite.root, self.path + self._get_suffix()) return os.path.join(self.suite.root, self.path + self._get_suffix())
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -103,5 +103,5 @@ class TestCase(testcase.TestCase): ...@@ -103,5 +103,5 @@ class TestCase(testcase.TestCase):
self._expected_fail()) self._expected_fail())
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -179,7 +179,8 @@ class CombinedTest(testcase.TestCase): ...@@ -179,7 +179,8 @@ class CombinedTest(testcase.TestCase):
passed as arguments. passed as arguments.
""" """
def __init__(self, name, tests): def __init__(self, name, tests):
super(CombinedTest, self).__init__(tests[0].suite, '', name) super(CombinedTest, self).__init__(tests[0].suite, '', name,
tests[0]._test_config)
self._tests = tests self._tests = tests
def _prepare_outcomes(self, force_update=True): def _prepare_outcomes(self, force_update=True):
...@@ -195,8 +196,6 @@ class CombinedTest(testcase.TestCase): ...@@ -195,8 +196,6 @@ class CombinedTest(testcase.TestCase):
""" """
shell = 'd8' shell = 'd8'
shell_flags = ['--test', '--disable-abortjs', '--quiet-load'] shell_flags = ['--test', '--disable-abortjs', '--quiet-load']
if ctx.random_seed:
shell_flags.append('--random-seed=%s' % ctx.random_seed)
return shell, shell_flags return shell, shell_flags
def _get_cmd_params(self, ctx): def _get_cmd_params(self, ctx):
...@@ -238,5 +237,5 @@ class SuppressedTestCase(TestCase): ...@@ -238,5 +237,5 @@ class SuppressedTestCase(TestCase):
) )
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -44,5 +44,5 @@ class TestCase(testcase.TestCase): ...@@ -44,5 +44,5 @@ class TestCase(testcase.TestCase):
return mkgrokdump.OutProc(self.expected_outcomes, self.suite.expected_path) return mkgrokdump.OutProc(self.expected_outcomes, self.suite.expected_path)
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -56,9 +56,9 @@ TEST_DIRS = """ ...@@ -56,9 +56,9 @@ TEST_DIRS = """
class TestSuite(testsuite.TestSuite): class TestSuite(testsuite.TestSuite):
def __init__(self, name, root): def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(name, root) super(TestSuite, self).__init__(*args, **kwargs)
self.testroot = os.path.join(root, "data") self.testroot = os.path.join(self.root, "data")
def ListTests(self, context): def ListTests(self, context):
tests = [] tests = []
...@@ -118,5 +118,5 @@ class TestCase(testcase.TestCase): ...@@ -118,5 +118,5 @@ class TestCase(testcase.TestCase):
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -85,8 +85,8 @@ class TestSuite(testsuite.TestSuite): ...@@ -85,8 +85,8 @@ class TestSuite(testsuite.TestSuite):
class TestCase(testcase.TestCase): class TestCase(testcase.TestCase):
def __init__(self, suite, path, name, source, template_flags): def __init__(self, suite, path, name, test_config, source, template_flags):
super(TestCase, self).__init__(suite, path, name) super(TestCase, self).__init__(suite, path, name, test_config)
self._source = source self._source = source
self._template_flags = template_flags self._template_flags = template_flags
...@@ -113,5 +113,5 @@ class TestCase(testcase.TestCase): ...@@ -113,5 +113,5 @@ class TestCase(testcase.TestCase):
return self._source return self._source
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -116,8 +116,8 @@ class TestSuite(testsuite.TestSuite): ...@@ -116,8 +116,8 @@ class TestSuite(testsuite.TestSuite):
# Match the (...) in '/path/to/v8/test/test262/subdir/test/(...).js' # Match the (...) in '/path/to/v8/test/test262/subdir/test/(...).js'
# In practice, subdir is data or local-tests # In practice, subdir is data or local-tests
def __init__(self, name, root): def __init__(self, *args, **kwargs):
super(TestSuite, self).__init__(name, root) super(TestSuite, self).__init__(*args, **kwargs)
self.testroot = os.path.join(self.root, *TEST_262_SUITE_PATH) self.testroot = os.path.join(self.root, *TEST_262_SUITE_PATH)
self.harnesspath = os.path.join(self.root, *TEST_262_HARNESS_PATH) self.harnesspath = os.path.join(self.root, *TEST_262_HARNESS_PATH)
self.harness = [os.path.join(self.harnesspath, f) self.harness = [os.path.join(self.harnesspath, f)
...@@ -250,5 +250,5 @@ class TestCase(testcase.TestCase): ...@@ -250,5 +250,5 @@ class TestCase(testcase.TestCase):
return test262.NoExceptionOutProc(self.expected_outcomes) return test262.NoExceptionOutProc(self.expected_outcomes)
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -66,7 +66,7 @@ class TestCase(testcase.TestCase): ...@@ -66,7 +66,7 @@ class TestCase(testcase.TestCase):
def _get_suite_flags(self, ctx): def _get_suite_flags(self, ctx):
return ( return (
["--gtest_filter=" + self.path] + ["--gtest_filter=" + self.path] +
["--gtest_random_seed=%s" % ctx.random_seed] + ["--gtest_random_seed=%s" % self.random_seed] +
["--gtest_print_time=0"] ["--gtest_print_time=0"]
) )
...@@ -74,5 +74,5 @@ class TestCase(testcase.TestCase): ...@@ -74,5 +74,5 @@ class TestCase(testcase.TestCase):
return self.suite.name return self.suite.name
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -31,5 +31,5 @@ class TestCase(testcase.TestCase): ...@@ -31,5 +31,5 @@ class TestCase(testcase.TestCase):
return [os.path.join(self.suite.root, self.path + self._get_suffix())] return [os.path.join(self.suite.root, self.path + self._get_suffix())]
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -109,5 +109,5 @@ class TestCase(testcase.TestCase): ...@@ -109,5 +109,5 @@ class TestCase(testcase.TestCase):
os.path.join(self.suite.root, self.path) + '-expected.txt') os.path.join(self.suite.root, self.path) + '-expected.txt')
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -17,10 +17,11 @@ sys.path.insert( ...@@ -17,10 +17,11 @@ sys.path.insert(
os.path.dirname(os.path.abspath(__file__)))) os.path.dirname(os.path.abspath(__file__))))
from local import testsuite from testrunner.local import testsuite
from local import utils from testrunner.local import utils
from testrunner.test_config import TestConfig
from testproc.shard import ShardProc from testrunner.testproc.shard import ShardProc
from testrunner.testproc.timeout import TimeoutProc
BASE_DIR = ( BASE_DIR = (
...@@ -215,7 +216,7 @@ class BaseTestRunner(object): ...@@ -215,7 +216,7 @@ class BaseTestRunner(object):
raise raise
args = self._parse_test_args(args) args = self._parse_test_args(args)
suites = self._get_suites(args, options.verbose) suites = self._get_suites(args, options)
self._setup_env() self._setup_env()
return self._do_execute(suites, args, options) return self._do_execute(suites, args, options)
...@@ -255,6 +256,8 @@ class BaseTestRunner(object): ...@@ -255,6 +256,8 @@ class BaseTestRunner(object):
parser.add_option("--shard-run", parser.add_option("--shard-run",
help="Run this shard from the split up tests.", help="Run this shard from the split up tests.",
default=1, type="int") default=1, type="int")
parser.add_option("--total-timeout-sec", default=0, type="int",
help="How long should fuzzer run")
# TODO(machenbach): Temporary options for rolling out new test runner # TODO(machenbach): Temporary options for rolling out new test runner
# features. # features.
...@@ -480,9 +483,9 @@ class BaseTestRunner(object): ...@@ -480,9 +483,9 @@ class BaseTestRunner(object):
return reduce(list.__add__, map(expand_test_group, args), []) return reduce(list.__add__, map(expand_test_group, args), [])
def _get_suites(self, args, verbose=False): def _get_suites(self, args, options):
names = self._args_to_suite_names(args) names = self._args_to_suite_names(args)
return self._load_suites(names, verbose) return self._load_suites(names, options)
def _args_to_suite_names(self, args): def _args_to_suite_names(self, args):
# Use default tests if no test configuration was provided at the cmd line. # Use default tests if no test configuration was provided at the cmd line.
...@@ -496,14 +499,19 @@ class BaseTestRunner(object): ...@@ -496,14 +499,19 @@ class BaseTestRunner(object):
def _expand_test_group(self, name): def _expand_test_group(self, name):
return TEST_MAP.get(name, [name]) return TEST_MAP.get(name, [name])
def _load_suites(self, names, verbose=False): def _load_suites(self, names, options):
test_config = self._create_test_config(options)
def load_suite(name): def load_suite(name):
if verbose: if options.verbose:
print '>>> Loading test suite: %s' % name print '>>> Loading test suite: %s' % name
return testsuite.TestSuite.LoadTestSuite( return testsuite.TestSuite.LoadTestSuite(
os.path.join(self.basedir, 'test', name)) os.path.join(self.basedir, 'test', name),
test_config)
return map(load_suite, names) return map(load_suite, names)
def _create_test_config(self, options):
return TestConfig(options.random_seed)
# TODO(majeski): remove options & args parameters # TODO(majeski): remove options & args parameters
def _do_execute(self, suites, args, options): def _do_execute(self, suites, args, options):
raise NotImplementedError() raise NotImplementedError()
...@@ -550,3 +558,8 @@ class BaseTestRunner(object): ...@@ -550,3 +558,8 @@ class BaseTestRunner(object):
return 1, 1 return 1, 1
return shard_run, shard_count return shard_run, shard_count
def _create_timeout_proc(self, options):
if not options.total_timeout_sec:
return None
return TimeoutProc(options.total_timeout_sec)
...@@ -321,18 +321,17 @@ class JUnitTestProgressIndicator(ProgressIndicator): ...@@ -321,18 +321,17 @@ class JUnitTestProgressIndicator(ProgressIndicator):
class JsonTestProgressIndicator(ProgressIndicator): class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, json_test_results, arch, mode, random_seed): def __init__(self, json_test_results, arch, mode):
super(JsonTestProgressIndicator, self).__init__() super(JsonTestProgressIndicator, self).__init__()
self.json_test_results = json_test_results self.json_test_results = json_test_results
self.arch = arch self.arch = arch
self.mode = mode self.mode = mode
self.random_seed = random_seed
self.results = [] self.results = []
self.tests = [] self.tests = []
def ToProgressIndicatorProc(self): def ToProgressIndicatorProc(self):
return progress_proc.JsonTestProgressIndicator( return progress_proc.JsonTestProgressIndicator(
self.json_test_results, self.arch, self.mode, self.random_seed) self.json_test_results, self.arch, self.mode)
def Done(self): def Done(self):
complete_results = [] complete_results = []
...@@ -380,6 +379,16 @@ class JsonTestProgressIndicator(ProgressIndicator): ...@@ -380,6 +379,16 @@ class JsonTestProgressIndicator(ProgressIndicator):
# will have unexpected_output to be reported here has well. # will have unexpected_output to be reported here has well.
return return
random_seed = None
for i, flag in enumerate(reversed(test.cmd.args)):
if 'random-seed' in flag:
if '=' in flag:
random_seed = flag.split('=')[1]
break
elif i > 0:
random_seed = test.cmd.args[i - 1]
break
self.results.append({ self.results.append({
"name": str(test), "name": str(test),
"flags": test.cmd.args, "flags": test.cmd.args,
...@@ -391,10 +400,7 @@ class JsonTestProgressIndicator(ProgressIndicator): ...@@ -391,10 +400,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
"result": test.output_proc.get_outcome(output), "result": test.output_proc.get_outcome(output),
"expected": test.expected_outcomes, "expected": test.expected_outcomes,
"duration": output.duration, "duration": output.duration,
"random_seed": int(random_seed),
# TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress.
"random_seed": self.random_seed,
"target_name": test.get_shell(), "target_name": test.get_shell(),
"variant": test.variant, "variant": test.variant,
}) })
......
...@@ -100,21 +100,21 @@ class TestCombiner(object): ...@@ -100,21 +100,21 @@ class TestCombiner(object):
class TestSuite(object): class TestSuite(object):
@staticmethod @staticmethod
def LoadTestSuite(root): def LoadTestSuite(root, test_config):
name = root.split(os.path.sep)[-1] name = root.split(os.path.sep)[-1]
f = None f = None
try: try:
(f, pathname, description) = imp.find_module("testcfg", [root]) (f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module(name + "_testcfg", f, pathname, description) module = imp.load_module(name + "_testcfg", f, pathname, description)
return module.GetSuite(name, root) return module.GetSuite(name, root, test_config)
finally: finally:
if f: if f:
f.close() f.close()
def __init__(self, name, root): def __init__(self, name, root, test_config):
# Note: This might be called concurrently from different processes.
self.name = name # string self.name = name # string
self.root = root # string containing path self.root = root # string containing path
self.test_config = test_config
self.tests = None # list of TestCase objects self.tests = None # list of TestCase objects
self.statusfile = None self.statusfile = None
self.suppress_internals = False self.suppress_internals = False
...@@ -242,8 +242,8 @@ class TestSuite(object): ...@@ -242,8 +242,8 @@ class TestSuite(object):
test_class = self._suppressed_test_class() test_class = self._suppressed_test_class()
else: else:
test_class = self._test_class() test_class = self._test_class()
test = test_class(self, path, self._path_to_name(path), **kwargs) return test_class(self, path, self._path_to_name(path), self.test_config,
return test **kwargs)
def _suppressed_test_class(self): def _suppressed_test_class(self):
"""Optional testcase that suppresses assertions. Used by fuzzers that are """Optional testcase that suppresses assertions. Used by fuzzers that are
......
...@@ -25,7 +25,7 @@ from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc ...@@ -25,7 +25,7 @@ from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc from testrunner.testproc.loader import LoadProc
from testrunner.testproc.progress import ResultsTracker, TestsCounter from testrunner.testproc.progress import ResultsTracker, TestsCounter
from testrunner.testproc.rerun import RerunProc from testrunner.testproc.rerun import RerunProc
from testrunner.testproc.timeout import TimeoutProc from testrunner.utils import random_utils
DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"] DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
...@@ -61,7 +61,7 @@ class NumFuzzer(base_runner.BaseTestRunner): ...@@ -61,7 +61,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
default="mono") default="mono")
parser.add_option("-t", "--timeout", help="Timeout in seconds", parser.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int") default= -1, type="int")
parser.add_option("--random-seed", default=0, parser.add_option("--random-seed", default=0, type=int,
help="Default seed for initializing random generator") help="Default seed for initializing random generator")
parser.add_option("--fuzzer-random-seed", default=0, parser.add_option("--fuzzer-random-seed", default=0,
help="Default seed for initializing fuzzer random " help="Default seed for initializing fuzzer random "
...@@ -82,9 +82,6 @@ class NumFuzzer(base_runner.BaseTestRunner): ...@@ -82,9 +82,6 @@ class NumFuzzer(base_runner.BaseTestRunner):
"value 0 to provide infinite number of subtests. " "value 0 to provide infinite number of subtests. "
"When --combine-tests is set it indicates how many " "When --combine-tests is set it indicates how many "
"tests to create in total") "tests to create in total")
parser.add_option("--total-timeout-sec", default=0, type="int",
help="How long should fuzzer run. It overrides "
"--tests-count")
# Stress gc # Stress gc
parser.add_option("--stress-marking", default=0, type="int", parser.add_option("--stress-marking", default=0, type="int",
...@@ -130,12 +127,8 @@ class NumFuzzer(base_runner.BaseTestRunner): ...@@ -130,12 +127,8 @@ class NumFuzzer(base_runner.BaseTestRunner):
options.extra_flags = shlex.split(options.extra_flags) options.extra_flags = shlex.split(options.extra_flags)
if options.j == 0: if options.j == 0:
options.j = multiprocessing.cpu_count() options.j = multiprocessing.cpu_count()
while options.random_seed == 0: if not options.fuzzer_random_seed:
options.random_seed = random.SystemRandom().randint(-2147483648, options.fuzzer_random_seed = random_utils.random_seed()
2147483647)
while options.fuzzer_random_seed == 0:
options.fuzzer_random_seed = random.SystemRandom().randint(-2147483648,
2147483647)
if options.total_timeout_sec: if options.total_timeout_sec:
options.tests_count = 0 options.tests_count = 0
...@@ -165,8 +158,7 @@ class NumFuzzer(base_runner.BaseTestRunner): ...@@ -165,8 +158,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
progress_indicator.Register(progress.JsonTestProgressIndicator( progress_indicator.Register(progress.JsonTestProgressIndicator(
options.json_test_results, options.json_test_results,
self.build_config.arch, self.build_config.arch,
self.mode_options.execution_mode, self.mode_options.execution_mode))
ctx.random_seed))
loader = LoadProc() loader = LoadProc()
fuzzer_rng = random.Random(options.fuzzer_random_seed) fuzzer_rng = random.Random(options.fuzzer_random_seed)
...@@ -234,7 +226,6 @@ class NumFuzzer(base_runner.BaseTestRunner): ...@@ -234,7 +226,6 @@ class NumFuzzer(base_runner.BaseTestRunner):
options.command_prefix, options.command_prefix,
options.extra_flags, options.extra_flags,
False, # Keep i18n on by default. False, # Keep i18n on by default.
options.random_seed,
True, # No sorting of test cases. True, # No sorting of test cases.
options.rerun_failures_count, options.rerun_failures_count,
options.rerun_failures_max, options.rerun_failures_max,
...@@ -336,11 +327,6 @@ class NumFuzzer(base_runner.BaseTestRunner): ...@@ -336,11 +327,6 @@ class NumFuzzer(base_runner.BaseTestRunner):
add('deopt', options.stress_deopt, options.stress_deopt_min) add('deopt', options.stress_deopt, options.stress_deopt_min)
return fuzzers return fuzzers
def _create_timeout_proc(self, options):
if not options.total_timeout_sec:
return None
return TimeoutProc(options.total_timeout_sec)
def _create_rerun_proc(self, options): def _create_rerun_proc(self, options):
if not options.rerun_failures_count: if not options.rerun_failures_count:
return None return None
......
...@@ -28,24 +28,27 @@ ...@@ -28,24 +28,27 @@
class Context(): class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout, def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
isolates, command_prefix, extra_flags, noi18n, random_seed, isolates, command_prefix, extra_flags, noi18n, no_sorting,
no_sorting, rerun_failures_count, rerun_failures_max, no_harness, rerun_failures_count, rerun_failures_max, no_harness,
use_perf_data, sancov_dir, infra_staging=False): use_perf_data, sancov_dir):
# Used by perfdata
self.arch = arch self.arch = arch
self.mode = mode self.mode = mode
self.shell_dir = shell_dir self.no_sorting = no_sorting
self.mode_flags = mode_flags self.use_perf_data = use_perf_data
self.verbose = verbose
self.timeout = timeout # Used by testcase to create command
self.isolates = isolates
self.command_prefix = command_prefix self.command_prefix = command_prefix
self.extra_flags = extra_flags self.extra_flags = extra_flags
self.isolates = isolates
self.mode_flags = mode_flags
self.no_harness = no_harness
self.noi18n = noi18n self.noi18n = noi18n
self.random_seed = random_seed self.shell_dir = shell_dir
self.no_sorting = no_sorting self.timeout = timeout
self.verbose = verbose
# Will be deprecated after moving to test processors
self.rerun_failures_count = rerun_failures_count self.rerun_failures_count = rerun_failures_count
self.rerun_failures_max = rerun_failures_max self.rerun_failures_max = rerun_failures_max
self.no_harness = no_harness
self.use_perf_data = use_perf_data
self.sancov_dir = sancov_dir self.sancov_dir = sancov_dir
self.infra_staging = infra_staging
...@@ -40,7 +40,7 @@ FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)") ...@@ -40,7 +40,7 @@ FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
class TestCase(object): class TestCase(object):
def __init__(self, suite, path, name): def __init__(self, suite, path, name, test_config):
self.suite = suite # TestSuite object self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo' self.path = path # string, e.g. 'div-mod', 'test-api/foo'
...@@ -59,18 +59,26 @@ class TestCase(object): ...@@ -59,18 +59,26 @@ class TestCase(object):
self.procid = '%s/%s' % (self.suite.name, self.name) # unique id self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
self.keep_output = False # Can output of this test be dropped self.keep_output = False # Can output of this test be dropped
# Test config contains information needed to build the command.
# TODO(majeski): right now it contains only random seed.
self._test_config = test_config
# Overrides default random seed from test_config if specified.
self._random_seed = None
self._statusfile_outcomes = None self._statusfile_outcomes = None
self._expected_outcomes = None # optimization: None == [statusfile.PASS] self.expected_outcomes = None
self._statusfile_flags = None self._statusfile_flags = None
self._prepare_outcomes() self._prepare_outcomes()
def create_subtest(self, processor, subtest_id, variant=None, flags=None, def create_subtest(self, processor, subtest_id, variant=None, flags=None,
keep_output=False): keep_output=False, random_seed=None):
subtest = copy.copy(self) subtest = copy.copy(self)
subtest.origin = self subtest.origin = self
subtest.processor = processor subtest.processor = processor
subtest.procid += '.%s' % subtest_id subtest.procid += '.%s' % subtest_id
subtest.keep_output |= keep_output subtest.keep_output |= keep_output
if random_seed:
subtest._random_seed = random_seed
if flags: if flags:
subtest.variant_flags = subtest.variant_flags + flags subtest.variant_flags = subtest.variant_flags + flags
if variant is not None: if variant is not None:
...@@ -79,7 +87,8 @@ class TestCase(object): ...@@ -79,7 +87,8 @@ class TestCase(object):
subtest._prepare_outcomes() subtest._prepare_outcomes()
return subtest return subtest
def create_variant(self, variant, flags, procid_suffix=None): def create_variant(self, variant, flags, procid_suffix=None,
random_seed=None):
"""Makes a shallow copy of the object and updates variant, variant flags and """Makes a shallow copy of the object and updates variant, variant flags and
all fields that depend on it, e.g. expected outcomes. all fields that depend on it, e.g. expected outcomes.
...@@ -88,6 +97,8 @@ class TestCase(object): ...@@ -88,6 +97,8 @@ class TestCase(object):
flags - flags that should be added to origin test's variant flags flags - flags that should be added to origin test's variant flags
procid_suffix - for multiple variants with the same name set suffix to procid_suffix - for multiple variants with the same name set suffix to
keep procid unique. keep procid unique.
random_seed - random seed to use in this variant. None means use base
test's random seed.
""" """
other = copy.copy(self) other = copy.copy(self)
if not self.variant_flags: if not self.variant_flags:
...@@ -100,6 +111,9 @@ class TestCase(object): ...@@ -100,6 +111,9 @@ class TestCase(object):
else: else:
other.procid += '[%s]' % variant other.procid += '[%s]' % variant
if random_seed:
other._random_seed = random_seed
other._prepare_outcomes(variant != self.variant) other._prepare_outcomes(variant != self.variant)
return other return other
...@@ -171,6 +185,7 @@ class TestCase(object): ...@@ -171,6 +185,7 @@ class TestCase(object):
def _get_cmd_params(self, ctx): def _get_cmd_params(self, ctx):
"""Gets command parameters and combines them in the following order: """Gets command parameters and combines them in the following order:
- files [empty by default] - files [empty by default]
- random seed
- extra flags (from command line) - extra flags (from command line)
- user flags (variant/fuzzer flags) - user flags (variant/fuzzer flags)
- statusfile flags - statusfile flags
...@@ -182,6 +197,7 @@ class TestCase(object): ...@@ -182,6 +197,7 @@ class TestCase(object):
""" """
return ( return (
self._get_files_params(ctx) + self._get_files_params(ctx) +
self._get_random_seed_flags() +
self._get_extra_flags(ctx) + self._get_extra_flags(ctx) +
self._get_variant_flags() + self._get_variant_flags() +
self._get_statusfile_flags() + self._get_statusfile_flags() +
...@@ -196,6 +212,13 @@ class TestCase(object): ...@@ -196,6 +212,13 @@ class TestCase(object):
def _get_files_params(self, ctx): def _get_files_params(self, ctx):
return [] return []
def _get_random_seed_flags(self):
return ['--random-seed=%d' % self.random_seed]
@property
def random_seed(self):
return self._random_seed or self._test_config.random_seed
def _get_extra_flags(self, ctx): def _get_extra_flags(self, ctx):
return ctx.extra_flags return ctx.extra_flags
...@@ -225,8 +248,6 @@ class TestCase(object): ...@@ -225,8 +248,6 @@ class TestCase(object):
shell_flags.append('--test') shell_flags.append('--test')
if utils.IsWindows(): if utils.IsWindows():
shell += '.exe' shell += '.exe'
if ctx.random_seed:
shell_flags.append('--random-seed=%s' % ctx.random_seed)
return shell, shell_flags return shell, shell_flags
def _get_timeout(self, params, timeout): def _get_timeout(self, params, timeout):
......
...@@ -34,7 +34,9 @@ from testrunner.testproc.progress import (VerboseProgressIndicator, ...@@ -34,7 +34,9 @@ from testrunner.testproc.progress import (VerboseProgressIndicator,
ResultsTracker, ResultsTracker,
TestsCounter) TestsCounter)
from testrunner.testproc.rerun import RerunProc from testrunner.testproc.rerun import RerunProc
from testrunner.testproc.seed import SeedProc
from testrunner.testproc.variant import VariantProc from testrunner.testproc.variant import VariantProc
from testrunner.utils import random_utils
TIMEOUT_DEFAULT = 60 TIMEOUT_DEFAULT = 60
...@@ -220,7 +222,9 @@ class StandardTestRunner(base_runner.BaseTestRunner): ...@@ -220,7 +222,9 @@ class StandardTestRunner(base_runner.BaseTestRunner):
type=int) type=int)
parser.add_option("--random-seed-stress-count", default=1, type="int", parser.add_option("--random-seed-stress-count", default=1, type="int",
dest="random_seed_stress_count", dest="random_seed_stress_count",
help="Number of runs with different random seeds") help="Number of runs with different random seeds. Only "
"with test processors: 0 means infinite "
"generation.")
def _use_staging(self, options): def _use_staging(self, options):
if options.infra_staging is not None: if options.infra_staging is not None:
...@@ -302,9 +306,6 @@ class StandardTestRunner(base_runner.BaseTestRunner): ...@@ -302,9 +306,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if options.j == 0: if options.j == 0:
options.j = multiprocessing.cpu_count() options.j = multiprocessing.cpu_count()
if options.random_seed_stress_count <= 1 and options.random_seed == 0:
options.random_seed = self._random_seed()
if options.variants == "infra_staging": if options.variants == "infra_staging":
options.variants = "exhaustive" options.variants = "exhaustive"
options.infra_staging = True options.infra_staging = True
...@@ -356,12 +357,6 @@ class StandardTestRunner(base_runner.BaseTestRunner): ...@@ -356,12 +357,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
"allow_user_segv_handler=1", "allow_user_segv_handler=1",
]) ])
def _random_seed(self):
seed = 0
while not seed:
seed = random.SystemRandom().randint(-2147483648, 2147483647)
return seed
def _execute(self, args, options, suites): def _execute(self, args, options, suites):
print(">>> Running tests for %s.%s" % (self.build_config.arch, print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name)) self.mode_name))
...@@ -387,14 +382,12 @@ class StandardTestRunner(base_runner.BaseTestRunner): ...@@ -387,14 +382,12 @@ class StandardTestRunner(base_runner.BaseTestRunner):
options.command_prefix, options.command_prefix,
options.extra_flags, options.extra_flags,
self.build_config.no_i18n, self.build_config.no_i18n,
options.random_seed,
options.no_sorting, options.no_sorting,
options.rerun_failures_count, options.rerun_failures_count,
options.rerun_failures_max, options.rerun_failures_max,
options.no_harness, options.no_harness,
use_perf_data=not options.swarming, use_perf_data=not options.swarming,
sancov_dir=self.sancov_dir, sancov_dir=self.sancov_dir)
infra_staging=options.infra_staging)
# TODO(all): Combine "simulator" and "simulator_run". # TODO(all): Combine "simulator" and "simulator_run".
# TODO(machenbach): In GN we can derive simulator run from # TODO(machenbach): In GN we can derive simulator run from
...@@ -441,8 +434,7 @@ class StandardTestRunner(base_runner.BaseTestRunner): ...@@ -441,8 +434,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
progress_indicator.Register(progress.JsonTestProgressIndicator( progress_indicator.Register(progress.JsonTestProgressIndicator(
options.json_test_results, options.json_test_results,
self.build_config.arch, self.build_config.arch,
self.mode_options.execution_mode, self.mode_options.execution_mode))
ctx.random_seed))
if options.flakiness_results: # pragma: no cover if options.flakiness_results: # pragma: no cover
progress_indicator.Register(progress.FlakinessTestProgressIndicator( progress_indicator.Register(progress.FlakinessTestProgressIndicator(
options.flakiness_results)) options.flakiness_results))
...@@ -480,23 +472,20 @@ class StandardTestRunner(base_runner.BaseTestRunner): ...@@ -480,23 +472,20 @@ class StandardTestRunner(base_runner.BaseTestRunner):
for v in variant_gen.FilterVariantsByTest(t) for v in variant_gen.FilterVariantsByTest(t)
for flags in variant_gen.GetFlagSets(t, v) ] for flags in variant_gen.GetFlagSets(t, v) ]
if options.random_seed_stress_count > 1: # Duplicate test for random seed stress mode.
# Duplicate test for random seed stress mode. def iter_seed_flags():
def iter_seed_flags(): for _ in range(0, options.random_seed_stress_count or 1):
for _ in range(0, options.random_seed_stress_count): # Use given random seed for all runs (set by default in
# Use given random seed for all runs (set by default in # execution.py) or a new random seed if none is specified.
# execution.py) or a new random seed if none is specified. if options.random_seed:
if options.random_seed: yield options.random_seed
yield [] else:
else: yield random_utils.random_seed()
yield ["--random-seed=%d" % self._random_seed()] s.tests = [
s.tests = [ t.create_variant(t.variant, [], 'seed-%d' % n, random_seed=val)
t.create_variant(t.variant, flags, 'seed-stress-%d' % n) for t in variant_tests
for t in variant_tests for n, val in enumerate(iter_seed_flags())
for n, flags in enumerate(iter_seed_flags()) ]
]
else:
s.tests = variant_tests
# Second filtering by status applying also the variant-dependent rules. # Second filtering by status applying also the variant-dependent rules.
if options.warn_unused: if options.warn_unused:
...@@ -592,8 +581,10 @@ class StandardTestRunner(base_runner.BaseTestRunner): ...@@ -592,8 +581,10 @@ class StandardTestRunner(base_runner.BaseTestRunner):
tests_counter, tests_counter,
VariantProc(self._variants), VariantProc(self._variants),
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests), StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
self._create_seed_proc(options),
] + indicators + [ ] + indicators + [
results, results,
self._create_timeout_proc(options),
self._create_rerun_proc(context), self._create_rerun_proc(context),
execproc, execproc,
] ]
...@@ -633,6 +624,11 @@ class StandardTestRunner(base_runner.BaseTestRunner): ...@@ -633,6 +624,11 @@ class StandardTestRunner(base_runner.BaseTestRunner):
exit_code = 0 exit_code = 0
return exit_code return exit_code
def _create_seed_proc(self, options):
if options.random_seed_stress_count == 1 and options.random_seed:
return None
return SeedProc(options.random_seed_stress_count, options.random_seed)
def _create_rerun_proc(self, ctx): def _create_rerun_proc(self, ctx):
if not ctx.rerun_failures_count: if not ctx.rerun_failures_count:
return None return None
......
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
# TODO(majeski): Move the rest of stuff from context
class TestConfig(object):
def __init__(self, random_seed):
# random_seed is always not None.
self.random_seed = random_seed or self._gen_random_seed()
def _gen_random_seed(self):
seed = None
while not seed:
seed = random.SystemRandom().randint(-2147483648, 2147483647)
return seed
...@@ -292,7 +292,7 @@ class JUnitTestProgressIndicator(ProgressIndicator): ...@@ -292,7 +292,7 @@ class JUnitTestProgressIndicator(ProgressIndicator):
class JsonTestProgressIndicator(ProgressIndicator): class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, json_test_results, arch, mode, random_seed): def __init__(self, json_test_results, arch, mode):
super(JsonTestProgressIndicator, self).__init__() super(JsonTestProgressIndicator, self).__init__()
# We want to drop stdout/err for all passed tests on the first try, but we # We want to drop stdout/err for all passed tests on the first try, but we
# need to get outputs for all runs after the first one. To accommodate that, # need to get outputs for all runs after the first one. To accommodate that,
...@@ -303,7 +303,6 @@ class JsonTestProgressIndicator(ProgressIndicator): ...@@ -303,7 +303,6 @@ class JsonTestProgressIndicator(ProgressIndicator):
self.json_test_results = json_test_results self.json_test_results = json_test_results
self.arch = arch self.arch = arch
self.mode = mode self.mode = mode
self.random_seed = random_seed
self.results = [] self.results = []
self.tests = [] self.tests = []
...@@ -338,10 +337,7 @@ class JsonTestProgressIndicator(ProgressIndicator): ...@@ -338,10 +337,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
"result": test.output_proc.get_outcome(output), "result": test.output_proc.get_outcome(output),
"expected": test.expected_outcomes, "expected": test.expected_outcomes,
"duration": output.duration, "duration": output.duration,
"random_seed": test.random_seed,
# TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress.
"random_seed": self.random_seed,
"target_name": test.get_shell(), "target_name": test.get_shell(),
"variant": test.variant, "variant": test.variant,
}) })
......
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
from collections import defaultdict
from . import base
from ..utils import random_utils
class SeedProc(base.TestProcProducer):
def __init__(self, count, seed=None):
"""
Args:
count: How many subtests with different seeds to create for each test.
0 means infinite.
seed: seed to use. None means random seed for each subtest.
"""
super(SeedProc, self).__init__('Seed')
self._count = count
self._seed = seed
self._last_idx = defaultdict(int)
def setup(self, requirement=base.DROP_RESULT):
super(SeedProc, self).setup(requirement)
# SeedProc is optimized for dropping the result
assert requirement == base.DROP_RESULT
def _next_test(self, test):
self._try_send_next_test(test)
def _result_for(self, test, subtest, result):
self._try_send_next_test(test)
def _try_send_next_test(self, test):
def create_subtest(idx):
seed = self._seed or random_utils.random_seed()
return self._create_subtest(test, idx, random_seed=seed)
num = self._last_idx[test.procid]
if not self._count or num < self._count:
num += 1
self._send_test(create_subtest(num))
self._last_idx[test.procid] = num
else:
del self._last_idx[test.procid]
self._send_result(test, None)
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
def random_seed():
"""Returns random, non-zero seed."""
seed = 0
while not seed:
seed = random.SystemRandom().randint(-2147483648, 2147483647)
return seed
...@@ -305,6 +305,7 @@ class SystemTest(unittest.TestCase): ...@@ -305,6 +305,7 @@ class SystemTest(unittest.TestCase):
# flags field of the test result. # flags field of the test result.
# After recent changes we report all flags, including the file names. # After recent changes we report all flags, including the file names.
# This is redundant to the command. Needs investigation. # This is redundant to the command. Needs investigation.
self.maxDiff = None
self.check_cleaned_json_output('expected_test_results1.json', json_path) self.check_cleaned_json_output('expected_test_results1.json', json_path)
def testFlakeWithRerunAndJSONProc(self): def testFlakeWithRerunAndJSONProc(self):
...@@ -336,6 +337,7 @@ class SystemTest(unittest.TestCase): ...@@ -336,6 +337,7 @@ class SystemTest(unittest.TestCase):
'Done running sweet/bananaflakes: pass', result.stdout, result) 'Done running sweet/bananaflakes: pass', result.stdout, result)
self.assertIn('All tests succeeded', result.stdout, result) self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result) self.assertEqual(0, result.returncode, result)
self.maxDiff = None
self.check_cleaned_json_output('expected_test_results2.json', json_path) self.check_cleaned_json_output('expected_test_results2.json', json_path)
def testAutoDetect(self): def testAutoDetect(self):
...@@ -549,7 +551,10 @@ class SystemTest(unittest.TestCase): ...@@ -549,7 +551,10 @@ class SystemTest(unittest.TestCase):
# timeout was used. # timeout was used.
self.assertEqual(0, result.returncode, result) self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithDefault(self): def testRandomSeedStressWithDefaultProc(self):
self.testRandomSeedStressWithDefault(infra_staging=True)
def testRandomSeedStressWithDefault(self, infra_staging=False):
"""Test using random-seed-stress feature has the right number of tests.""" """Test using random-seed-stress feature has the right number of tests."""
with temp_base() as basedir: with temp_base() as basedir:
result = run_tests( result = run_tests(
...@@ -559,8 +564,13 @@ class SystemTest(unittest.TestCase): ...@@ -559,8 +564,13 @@ class SystemTest(unittest.TestCase):
'--variants=default', '--variants=default',
'--random-seed-stress-count=2', '--random-seed-stress-count=2',
'sweet/bananas', 'sweet/bananas',
infra_staging=infra_staging,
) )
self.assertIn('Running 2 tests', result.stdout, result) if infra_staging:
self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
else:
self.assertIn('Running 2 tests', result.stdout, result)
self.assertEqual(0, result.returncode, result) self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithSeed(self): def testRandomSeedStressWithSeed(self):
......
...@@ -4,15 +4,15 @@ ...@@ -4,15 +4,15 @@
"mode": "release", "mode": "release",
"results": [ "results": [
{ {
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort", "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1, "duration": 1,
"exit_code": 1, "exit_code": 1,
"expected": [ "expected": [
"PASS" "PASS"
], ],
"flags": [ "flags": [
"--random-seed=123",
"strawberries", "strawberries",
"--random-seed=123",
"--nohard-abort" "--nohard-abort"
], ],
"name": "sweet/strawberries", "name": "sweet/strawberries",
...@@ -20,20 +20,20 @@ ...@@ -20,20 +20,20 @@
"result": "FAIL", "result": "FAIL",
"run": 1, "run": 1,
"stderr": "", "stderr": "",
"stdout": "--random-seed=123 strawberries --nohard-abort\n", "stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py", "target_name": "d8_mocked.py",
"variant": "default" "variant": "default"
}, },
{ {
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort", "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1, "duration": 1,
"exit_code": 1, "exit_code": 1,
"expected": [ "expected": [
"PASS" "PASS"
], ],
"flags": [ "flags": [
"--random-seed=123",
"strawberries", "strawberries",
"--random-seed=123",
"--nohard-abort" "--nohard-abort"
], ],
"name": "sweet/strawberries", "name": "sweet/strawberries",
...@@ -41,20 +41,20 @@ ...@@ -41,20 +41,20 @@
"result": "FAIL", "result": "FAIL",
"run": 2, "run": 2,
"stderr": "", "stderr": "",
"stdout": "--random-seed=123 strawberries --nohard-abort\n", "stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py", "target_name": "d8_mocked.py",
"variant": "default" "variant": "default"
}, },
{ {
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort", "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1, "duration": 1,
"exit_code": 1, "exit_code": 1,
"expected": [ "expected": [
"PASS" "PASS"
], ],
"flags": [ "flags": [
"--random-seed=123",
"strawberries", "strawberries",
"--random-seed=123",
"--nohard-abort" "--nohard-abort"
], ],
"name": "sweet/strawberries", "name": "sweet/strawberries",
...@@ -62,40 +62,40 @@ ...@@ -62,40 +62,40 @@
"result": "FAIL", "result": "FAIL",
"run": 3, "run": 3,
"stderr": "", "stderr": "",
"stdout": "--random-seed=123 strawberries --nohard-abort\n", "stdout": "strawberries --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py", "target_name": "d8_mocked.py",
"variant": "default" "variant": "default"
} }
], ],
"slowest_tests": [ "slowest_tests": [
{ {
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort", "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1, "duration": 1,
"flags": [ "flags": [
"--random-seed=123",
"strawberries", "strawberries",
"--random-seed=123",
"--nohard-abort" "--nohard-abort"
], ],
"marked_slow": true, "marked_slow": true,
"name": "sweet/strawberries" "name": "sweet/strawberries"
}, },
{ {
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort", "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1, "duration": 1,
"flags": [ "flags": [
"--random-seed=123",
"strawberries", "strawberries",
"--random-seed=123",
"--nohard-abort" "--nohard-abort"
], ],
"marked_slow": true, "marked_slow": true,
"name": "sweet/strawberries" "name": "sweet/strawberries"
}, },
{ {
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort", "command": "/usr/bin/python out/Release/d8_mocked.py strawberries --random-seed=123 --nohard-abort",
"duration": 1, "duration": 1,
"flags": [ "flags": [
"--random-seed=123",
"strawberries", "strawberries",
"--random-seed=123",
"--nohard-abort" "--nohard-abort"
], ],
"marked_slow": true, "marked_slow": true,
......
...@@ -4,15 +4,15 @@ ...@@ -4,15 +4,15 @@
"mode": "release", "mode": "release",
"results": [ "results": [
{ {
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort", "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1, "duration": 1,
"exit_code": 1, "exit_code": 1,
"expected": [ "expected": [
"PASS" "PASS"
], ],
"flags": [ "flags": [
"--random-seed=123",
"bananaflakes", "bananaflakes",
"--random-seed=123",
"--nohard-abort" "--nohard-abort"
], ],
"name": "sweet/bananaflakes", "name": "sweet/bananaflakes",
...@@ -20,20 +20,20 @@ ...@@ -20,20 +20,20 @@
"result": "FAIL", "result": "FAIL",
"run": 1, "run": 1,
"stderr": "", "stderr": "",
"stdout": "--random-seed=123 bananaflakes --nohard-abort\n", "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py", "target_name": "d8_mocked.py",
"variant": "default" "variant": "default"
}, },
{ {
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort", "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1, "duration": 1,
"exit_code": 0, "exit_code": 0,
"expected": [ "expected": [
"PASS" "PASS"
], ],
"flags": [ "flags": [
"--random-seed=123",
"bananaflakes", "bananaflakes",
"--random-seed=123",
"--nohard-abort" "--nohard-abort"
], ],
"name": "sweet/bananaflakes", "name": "sweet/bananaflakes",
...@@ -41,29 +41,29 @@ ...@@ -41,29 +41,29 @@
"result": "PASS", "result": "PASS",
"run": 2, "run": 2,
"stderr": "", "stderr": "",
"stdout": "--random-seed=123 bananaflakes --nohard-abort\n", "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
"target_name": "d8_mocked.py", "target_name": "d8_mocked.py",
"variant": "default" "variant": "default"
} }
], ],
"slowest_tests": [ "slowest_tests": [
{ {
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort", "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1, "duration": 1,
"flags": [ "flags": [
"--random-seed=123",
"bananaflakes", "bananaflakes",
"--random-seed=123",
"--nohard-abort" "--nohard-abort"
], ],
"marked_slow": false, "marked_slow": false,
"name": "sweet/bananaflakes" "name": "sweet/bananaflakes"
}, },
{ {
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort", "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
"duration": 1, "duration": 1,
"flags": [ "flags": [
"--random-seed=123",
"bananaflakes", "bananaflakes",
"--random-seed=123",
"--nohard-abort" "--nohard-abort"
], ],
"marked_slow": false, "marked_slow": false,
......
...@@ -27,5 +27,5 @@ class TestCase(testcase.TestCase): ...@@ -27,5 +27,5 @@ class TestCase(testcase.TestCase):
def _get_files_params(self, ctx): def _get_files_params(self, ctx):
return [self.name] return [self.name]
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
...@@ -27,5 +27,5 @@ class TestCase(testcase.TestCase): ...@@ -27,5 +27,5 @@ class TestCase(testcase.TestCase):
def _get_files_params(self, ctx): def _get_files_params(self, ctx):
return [self.name] return [self.name]
def GetSuite(name, root): def GetSuite(*args, **kwargs):
return TestSuite(name, root) return TestSuite(*args, **kwargs)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment