Commit 61c562b0 authored by Michal Majewski's avatar Michal Majewski Committed by Commit Bot

[test] Implement gc fuzzer with test processors

Bug: v8:6917
Change-Id: I2a7ecc6897c8ccd6ed862cf2b0b484673ee359f6
Reviewed-on: https://chromium-review.googlesource.com/871310
Commit-Queue: Michał Majewski <majeski@google.com>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50728}
parent eda12599
......@@ -9,6 +9,7 @@
'files': [
'run-deopt-fuzzer.py',
'run-gc-fuzzer.py',
'run-num-fuzzer.py',
],
},
'includes': [
......
#!/usr/bin/env python
#
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from testrunner import num_fuzzer
if __name__ == "__main__":
sys.exit(num_fuzzer.NumFuzzer().execute())
#!/usr/bin/env python
#
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import multiprocessing
import random
import shlex
import sys
# Adds testrunner to the path hence it has to be imported at the beggining.
import base_runner
from testrunner.local import progress
from testrunner.local import utils
from testrunner.objects import context
from testrunner.testproc import fuzzer
from testrunner.testproc.base import TestProcProducer
from testrunner.testproc.execution import ExecutionProc
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc
from testrunner.testproc.progress import ResultsTracker, TestsCounter
DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
TIMEOUT_DEFAULT = 60
# Double the timeout for these:
SLOW_ARCHS = ["arm",
"mipsel"]
class NumFuzzer(base_runner.BaseTestRunner):
def __init__(self, *args, **kwargs):
super(NumFuzzer, self).__init__(*args, **kwargs)
def _add_parser_options(self, parser):
parser.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
parser.add_option("--dump-results-file", help="Dump maximum limit reached")
parser.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
parser.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
parser.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
parser.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int")
parser.add_option("--random-seed", default=0,
help="Default seed for initializing random generator")
parser.add_option("--fuzzer-random-seed", default=0,
help="Default seed for initializing fuzzer random "
"generator")
parser.add_option("--stress-marking", default=0, type="int",
help="probability [0-10] of adding --stress-marking "
"flag to the test")
parser.add_option("--stress-scavenge", default=0, type="int",
help="probability [0-10] of adding --stress-scavenge "
"flag to the test")
parser.add_option("--stress-compaction", default=0, type="int",
help="probability [0-10] of adding --stress-compaction "
"flag to the test")
parser.add_option("--stress-gc", default=0, type="int",
help="probability [0-10] of adding --random-gc-interval "
"flag to the test")
parser.add_option("--tests-count", default=5, type="int",
help="Number of tests to generate from each base test")
return parser
def _process_options(self, options):
# Special processing of other options, sorted alphabetically.
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = shlex.split(options.extra_flags)
if options.j == 0:
options.j = multiprocessing.cpu_count()
while options.random_seed == 0:
options.random_seed = random.SystemRandom().randint(-2147483648,
2147483647)
while options.fuzzer_random_seed == 0:
options.fuzzer_random_seed = random.SystemRandom().randint(-2147483648,
2147483647)
return True
def _get_default_suite_names(self):
return DEFAULT_SUITES
def _do_execute(self, suites, args, options):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
ctx = self._create_context(options)
tests = self._load_tests(options, suites, ctx)
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
loader = LoadProc()
fuzzer_rng = random.Random(options.fuzzer_random_seed)
fuzzer_proc = fuzzer.FuzzerProc(
fuzzer_rng,
options.tests_count,
self._create_fuzzer_configs(options),
)
results = ResultsTracker()
execproc = ExecutionProc(options.j, ctx)
indicator = progress_indicator.ToProgressIndicatorProc()
procs = [
loader,
NameFilterProc(args) if args else None,
StatusFileFilterProc(None, None),
self._create_shard_proc(options),
fuzzer_proc,
results,
indicator,
execproc,
]
self._prepare_procs(procs)
loader.load_tests(tests)
execproc.start()
indicator.finished()
print '>>> %d tests ran' % results.total
if results.failed:
print '>>> %d tests failed' % results.failed
if results.failed:
return 1
if results.remaining:
return 2
return 0
def _create_context(self, options):
# Populate context object.
timeout = options.timeout
if timeout == -1:
# Simulators are slow, therefore allow a longer default timeout.
if self.build_config.arch in SLOW_ARCHS:
timeout = 2 * TIMEOUT_DEFAULT;
else:
timeout = TIMEOUT_DEFAULT;
timeout *= self.mode_options.timeout_scalefactor
ctx = context.Context(self.build_config.arch,
self.mode_options.execution_mode,
self.outdir,
self.mode_options.flags, options.verbose,
timeout, options.isolates,
options.command_prefix,
options.extra_flags,
False, # Keep i18n on by default.
options.random_seed,
True, # No sorting of test cases.
0, # Don't rerun failing tests.
0, # No use of a rerun-failing-tests maximum.
False, # No no_harness mode.
False, # Don't use perf data.
False) # Coverage not supported.
return ctx
def _load_tests(self, options, suites, ctx):
# Find available test suites and read test cases from them.
variables = {
"arch": self.build_config.arch,
"asan": self.build_config.asan,
"byteorder": sys.byteorder,
"dcheck_always_on": self.build_config.dcheck_always_on,
"deopt_fuzzer": False,
"gc_fuzzer": True,
"gc_stress": True,
"gcov_coverage": self.build_config.gcov_coverage,
"isolates": options.isolates,
"mode": self.mode_options.status_mode,
"msan": self.build_config.msan,
"no_harness": False,
"no_i18n": self.build_config.no_i18n,
"no_snap": self.build_config.no_snap,
"novfp3": False,
"predictable": self.build_config.predictable,
"simulator": utils.UseSimulator(self.build_config.arch),
"simulator_run": False,
"system": utils.GuessOS(),
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
}
tests = []
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
tests += s.tests
return tests
def _prepare_procs(self, procs):
procs = filter(None, procs)
for i in xrange(0, len(procs) - 1):
procs[i].connect_to(procs[i + 1])
procs[0].setup()
def _create_fuzzer_configs(self, options):
fuzzers = []
if options.stress_compaction:
fuzzers.append(fuzzer.create_compaction_config(options.stress_compaction))
if options.stress_marking:
fuzzers.append(fuzzer.create_marking_config(options.stress_marking))
if options.stress_scavenge:
fuzzers.append(fuzzer.create_scavenge_config(options.stress_scavenge))
if options.stress_gc:
fuzzers.append(fuzzer.create_gc_interval_config(options.stress_gc))
return fuzzers
if __name__ == '__main__':
sys.exit(NumFuzzer().execute())
......@@ -74,8 +74,9 @@ class TestCase(object):
if variant is not None:
assert self.variant is None
subtest.variant = variant
subtest.variant_flags = flags
subtest._prepare_outcomes()
if flags:
subtest.variant_flags = subtest.variant_flags + flags
return subtest
def create_variant(self, variant, flags, procid_suffix=None):
......
......@@ -119,7 +119,8 @@ class TestProc(object):
def _send_result(self, test, result):
"""Helper method for sending result to the previous processor."""
result = self._reduce_result(result)
if not test.keep_output:
result = self._reduce_result(result)
self._prev_proc.result_for(test, result)
......
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import namedtuple
from . import base
class FuzzerConfig(object):
def __init__(self, probability, analyzer, fuzzer):
"""
Args:
probability: of choosing this fuzzer (0; 10]
analyzer: instance of Analyzer class, can be None if no analysis is needed
fuzzer: instance of Fuzzer class
"""
assert probability > 0 and probability <= 10
self.probability = probability
self.analyzer = analyzer
self.fuzzer = fuzzer
class Analyzer(object):
def get_analysis_flags(self):
raise NotImplementedError()
def do_analysis(self, result):
raise NotImplementedError()
class Fuzzer(object):
def create_flags_generator(self, test, analysis_value):
raise NotImplementedError()
# TODO(majeski): Allow multiple subtests to run at once.
class FuzzerProc(base.TestProcProducer):
def __init__(self, rng, count, fuzzers):
"""
Args:
rng: random number generator used to select flags and values for them
count: number of tests to generate based on each base test
fuzzers: list of FuzzerConfig instances
"""
super(FuzzerProc, self).__init__('Fuzzer')
self._rng = rng
self._count = count
self._fuzzer_configs = fuzzers
self._gens = {}
def setup(self, requirement=base.DROP_RESULT):
# Fuzzer is optimized to not store the results
assert requirement == base.DROP_RESULT
super(FuzzerProc, self).setup(requirement)
def _next_test(self, test):
analysis_flags = []
for fuzzer_config in self._fuzzer_configs:
if fuzzer_config.analyzer:
analysis_flags += fuzzer_config.analyzer.get_analysis_flags()
if analysis_flags:
analysis_flags = list(set(analysis_flags))
subtest = self._create_subtest(test, 'analysis', flags=analysis_flags,
keep_output=True)
self._send_test(subtest)
return
self._gens[test.procid] = self._create_gen(test)
self._try_send_next_test(test)
def _result_for(self, test, subtest, result):
if result is not None:
# Analysis phase, for fuzzing we drop the result.
if result.has_unexpected_output:
self._send_result(test, None)
return
self._gens[test.procid] = self._create_gen(test, result)
self._try_send_next_test(test)
def _create_gen(self, test, analysis_result=None):
# It will be called with analysis_result==None only when there is no
# analysis phase at all, so no fuzzer has it's own analyzer.
gens = []
indexes = []
for i, fuzzer_config in enumerate(self._fuzzer_configs):
analysis_value = None
if fuzzer_config.analyzer:
analysis_value = fuzzer_config.analyzer.do_analysis(analysis_result)
if not analysis_value:
# Skip fuzzer for this test since it doesn't have analysis data
continue
p = fuzzer_config.probability
flag_gen = fuzzer_config.fuzzer.create_flags_generator(test,
analysis_value)
indexes += [len(gens)] * p
gens.append((p, flag_gen))
if not gens:
# No fuzzers for this test, skip it
return
for i in xrange(0, self._count):
main_index = self._rng.choice(indexes)
_, main_gen = gens[main_index]
flags = next(main_gen)
for index, (p, gen) in enumerate(gens):
if index == main_index:
continue
if self._rng.randint(1, 10) <= p:
flags += next(gen)
flags.append('--fuzzer-random-seed=%s' % self._next_seed())
yield self._create_subtest(test, str(i), flags=flags)
def _try_send_next_test(self, test):
for subtest in self._gens[test.procid]:
self._send_test(subtest)
return
del self._gens[test.procid]
self._send_result(test, None)
def _next_seed(self):
seed = None
while not seed:
seed = self._rng.randint(-2147483648, 2147483647)
return seed
def create_scavenge_config(probability):
return FuzzerConfig(probability, ScavengeAnalyzer(), ScavengeFuzzer())
def create_marking_config(probability):
return FuzzerConfig(probability, MarkingAnalyzer(), MarkingFuzzer())
def create_gc_interval_config(probability):
return FuzzerConfig(probability, GcIntervalAnalyzer(), GcIntervalFuzzer())
def create_compaction_config(probability):
return FuzzerConfig(probability, None, CompactionFuzzer())
class ScavengeAnalyzer(Analyzer):
def get_analysis_flags(self):
return ['--fuzzer-gc-analysis']
def do_analysis(self, result):
for line in reversed(result.output.stdout.splitlines()):
if line.startswith('### Maximum new space size reached = '):
return int(float(line.split()[7]))
class ScavengeFuzzer(Fuzzer):
def create_flags_generator(self, test, analysis_value):
while True:
yield ['--stress-scavenge=%d' % analysis_value]
class MarkingAnalyzer(Analyzer):
def get_analysis_flags(self):
return ['--fuzzer-gc-analysis']
def do_analysis(self, result):
for line in reversed(result.output.stdout.splitlines()):
if line.startswith('### Maximum marking limit reached = '):
return int(float(line.split()[6]))
class MarkingFuzzer(Fuzzer):
def create_flags_generator(self, test, analysis_value):
while True:
yield ['--stress-marking=%d' % analysis_value]
class GcIntervalAnalyzer(Analyzer):
def get_analysis_flags(self):
return ['--fuzzer-gc-analysis']
def do_analysis(self, result):
for line in reversed(result.output.stdout.splitlines()):
if line.startswith('### Allocations = '):
return int(float(line.split()[3][:-1]))
class GcIntervalFuzzer(Fuzzer):
def create_flags_generator(self, test, analysis_value):
value = analysis_value / 10
while True:
yield ['--random-gc-interval=%d' % value]
class CompactionFuzzer(Fuzzer):
def create_flags_generator(self, test, analysis_value):
while True:
yield ['--stress-compaction-random']
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment