Commit 0f2223c8 authored by Michal Majewski's avatar Michal Majewski Committed by Commit Bot

[test] Refactor getting test outcomes from statusfile.

Filtering by status file split to four parts:
1. Getting outcomes - reads both variant dependent and
independent outcomes, no more need to do it twice.
2. Checking unused rules - has a switch to check only variant
dependent/independent rules.
3. Reading flags - if outcome starts with '--' it is treated as a flag.
4. Actual filtering.

Outcomes removed from the testcase object, can be accessed
by call to its testsuite.

Bug: v8:6917
Cq-Include-Trybots: master.tryserver.v8:v8_linux_noi18n_rel_ng
Change-Id: I35762f891010ddda926250452b88656047433daa
Reviewed-on: https://chromium-review.googlesource.com/775160
Commit-Queue: Michał Majewski <majeski@google.com>
Reviewed-by: 's avatarSergiy Byelozyorov <sergiyb@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49547}
parent bd63de9c
......@@ -39,8 +39,8 @@ class BenchmarksVariantGenerator(testsuite.VariantGenerator):
# always opt to match the way the benchmarks are run for performance
# testing.
def FilterVariantsByTest(self, testcase):
if testcase.outcomes and statusfile.OnlyStandardVariant(
testcase.outcomes):
outcomes = self.suite.GetOutcomesForTestCase(testcase)
if outcomes and statusfile.OnlyStandardVariant(outcomes):
return self.standard_variant
return self.fast_variants
......
......@@ -102,7 +102,8 @@ FAST_VARIANTS = {
class Test262VariantGenerator(testsuite.VariantGenerator):
def GetFlagSets(self, testcase, variant):
if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
outcomes = testcase.suite.GetOutcomesForTestCase(testcase)
if outcomes and statusfile.OnlyFastVariants(outcomes):
variant_flags = FAST_VARIANTS
else:
variant_flags = ALL_VARIANTS
......@@ -169,7 +170,6 @@ class Test262TestSuite(testsuite.TestSuite):
if "detachArrayBuffer.js" in
self.GetTestRecord(testcase).get("includes", [])
else []) +
([flag for flag in testcase.outcomes if flag.startswith("--")]) +
([flag for (feature, flag) in FEATURE_FLAGS.items()
if feature in self.GetTestRecord(testcase).get("features", [])])
)
......@@ -244,10 +244,11 @@ class Test262TestSuite(testsuite.TestSuite):
def HasUnexpectedOutput(self, testcase):
outcome = self.GetOutcome(testcase)
if (statusfile.FAIL_SLOPPY in testcase.outcomes and
outcomes = self.GetOutcomesForTestCase(testcase)
if (statusfile.FAIL_SLOPPY in outcomes and
"--use-strict" not in testcase.flags):
return outcome != statusfile.FAIL
return not outcome in ([outcome for outcome in testcase.outcomes
return not outcome in ([outcome for outcome in outcomes
if not outcome.startswith('--')
and outcome != statusfile.FAIL_SLOPPY]
or [statusfile.PASS])
......
......@@ -305,6 +305,9 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
if len(args) > 0:
s.FilterTestCasesByArgs(args)
s.FilterTestCasesByStatus(False)
for t in s.tests:
t.flags += s.GetStatusfileFlags(t)
test_backup[s] = s.tests
analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT,
"--print-deopt-stress"]
......
......@@ -292,6 +292,9 @@ class GCFuzzer(base_runner.BaseTestRunner):
if len(args) > 0:
s.FilterTestCasesByArgs(args)
s.FilterTestCasesByStatus(False)
for t in s.tests:
t.flags += s.GetStatusfileFlags(t)
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
......
......@@ -114,11 +114,9 @@ def _GetInstructions(test, context):
timeout *= 4
if "--noenable-vfp3" in context.extra_flags:
timeout *= 2
# FIXME(machenbach): Make this more OO. Don't expose default outcomes or
# the like.
if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
timeout *= 2
# TODO(majeski): make it slow outcome dependent.
timeout *= 2
return Instructions(command, test.id, timeout, context.verbose, env)
......@@ -184,6 +182,7 @@ class TestJob(Job):
self.test.SetSuiteObject(process_context.suites)
instr = _GetInstructions(self.test, process_context.context)
except Exception, e:
# TODO(majeski): Better exception reporting.
return SetupProblem(e, self.test)
start_time = time.time()
......@@ -210,7 +209,7 @@ class Runner(object):
self.suite_names = [s.name for s in suites]
# Always pre-sort by status file, slowest tests first.
slow_key = lambda t: statusfile.IsSlow(t.outcomes)
slow_key = lambda t: statusfile.IsSlow(t.suite.GetOutcomesForTestCase(t))
self.tests.sort(key=slow_key, reverse=True)
# Sort by stored duration of not opted out.
......
......@@ -336,7 +336,8 @@ class JsonTestProgressIndicator(ProgressIndicator):
"flags": test.flags,
"command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
"duration": test.duration,
"marked_slow": statusfile.IsSlow(test.outcomes),
"marked_slow": statusfile.IsSlow(
test.suite.GetOutcomesForTestCase(test)),
} for test in timed_tests[:20]
]
......@@ -369,7 +370,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
"stderr": test.output.stderr,
"exit_code": test.output.exit_code,
"result": test.suite.GetOutcome(test),
"expected": list(test.outcomes or ["PASS"]),
"expected": list(test.suite.GetOutcomesForTestCase(test) or ["PASS"]),
"duration": test.duration,
# TODO(machenbach): This stores only the global random seed from the
......@@ -416,7 +417,7 @@ class FlakinessTestProgressIndicator(ProgressIndicator):
# First run of this test.
expected_outcomes = ([
expected
for expected in (test.outcomes or ["PASS"])
for expected in (test.suite.GetOutcomesForTestCase(test) or ["PASS"])
if expected in ["PASS", "FAIL", "CRASH", "TIMEOUT"]
] or ["PASS"])
self.results[key] = {
......
......@@ -50,15 +50,17 @@ class VariantGenerator(object):
def FilterVariantsByTest(self, testcase):
result = self.all_variants
if testcase.outcomes:
if statusfile.OnlyStandardVariant(testcase.outcomes):
outcomes = testcase.suite.GetOutcomesForTestCase(testcase)
if outcomes:
if statusfile.OnlyStandardVariant(outcomes):
return self.standard_variant
if statusfile.OnlyFastVariants(testcase.outcomes):
if statusfile.OnlyFastVariants(outcomes):
result = self.fast_variants
return result
def GetFlagSets(self, testcase, variant):
if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
outcomes = testcase.suite.GetOutcomesForTestCase(testcase)
if outcomes and statusfile.OnlyFastVariants(outcomes):
return FAST_VARIANT_FLAGS[variant]
else:
return ALL_VARIANT_FLAGS[variant]
......@@ -90,6 +92,8 @@ class TestSuite(object):
self.prefix_rules = None # {variant: {test name prefix: [rule]}}
self.total_duration = None # float, assigned on demand
self._outcomes_cache = dict()
def suffix(self):
return ".js"
......@@ -136,100 +140,96 @@ class TestSuite(object):
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
@staticmethod
def _FilterSlow(slow, mode):
return (mode == "run" and not slow) or (mode == "skip" and slow)
def GetStatusfileFlags(self, test):
"""Gets runtime flags from a status file.
@staticmethod
def _FilterPassFail(pass_fail, mode):
return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail)
def FilterTestCasesByStatus(self, warn_unused_rules,
slow_tests="dontcare",
pass_fail_tests="dontcare",
variants=False):
# Load statusfile before.
assert(self.rules is not None)
assert(self.prefix_rules is not None)
# Use only variants-dependent rules and prefix_rules when filtering
# respective test cases and generic rules when filtering generic test
# cases.
if not variants:
rules = self.rules[""]
prefix_rules = self.prefix_rules[""]
else:
# We set rules and prefix_rules to a variant-specific version for each
# test below.
rules = {}
prefix_rules = {}
Every outcome that starts with "--" is a flag. Status file has to be loaded
before using this function.
"""
flags = []
for outcome in self.GetOutcomesForTestCase(test):
if outcome.startswith('--'):
flags.append(outcome)
return flags
filtered = []
def FilterTestCasesByStatus(self,
slow_tests_mode=None,
pass_fail_tests_mode=None):
"""Filters tests by outcomes from status file.
# Remember used rules as tuples of (rule, variant), where variant is "" for
# variant-independent rules.
Status file has to be loaded before using this function.
Args:
slow_tests_mode: What to do with slow tests.
pass_fail_tests_mode: What to do with pass or fail tests.
Mode options:
None (default) - don't skip
"skip" - skip if slow/pass_fail
"run" - skip if not slow/pass_fail
"""
def _skip_slow(is_slow, mode):
return (
(mode == 'run' and not is_slow) or
(mode == 'skip' and is_slow))
def _skip_pass_fail(pass_fail, mode):
return (
(mode == 'run' and not pass_fail) or
(mode == 'skip' and pass_fail))
def _compliant(test):
outcomes = self.GetOutcomesForTestCase(test)
if statusfile.DoSkip(outcomes):
return False
if _skip_slow(statusfile.IsSlow(outcomes), slow_tests_mode):
return False
if _skip_pass_fail(statusfile.IsPassOrFail(outcomes),
pass_fail_tests_mode):
return False
return True
self.tests = filter(_compliant, self.tests)
def WarnUnusedRules(self, check_variant_rules=False):
"""Finds and prints unused rules in status file.
Rule X is unused when it doesn't apply to any tests, which can also mean
that all matching tests were skipped by another rule before evaluating X.
Status file has to be loaded before using this function.
"""
if check_variant_rules:
variants = list(ALL_VARIANTS)
else:
variants = ['']
used_rules = set()
for t in self.tests:
slow = False
pass_fail = False
testname = self.CommonTestName(t)
variant = t.variant or ""
if variants:
rules = self.rules[variant]
prefix_rules = self.prefix_rules[variant]
if testname in rules:
if testname in self.rules.get(variant, {}):
used_rules.add((testname, variant))
# Even for skipped tests, as the TestCase object stays around and
# PrintReport() uses it.
t.outcomes = t.outcomes | rules[testname]
if statusfile.DoSkip(t.outcomes):
continue # Don't add skipped tests to |filtered|.
for outcome in t.outcomes:
if outcome.startswith('--'):
t.flags += outcome.split()
slow = statusfile.IsSlow(t.outcomes)
pass_fail = statusfile.IsPassOrFail(t.outcomes)
skip = False
for prefix in prefix_rules:
if statusfile.DoSkip(self.rules[variant][testname]):
continue
for prefix in self.prefix_rules.get(variant, {}):
if testname.startswith(prefix):
used_rules.add((prefix, variant))
t.outcomes = t.outcomes | prefix_rules[prefix]
if statusfile.DoSkip(t.outcomes):
skip = True
break # "for rule in prefix_rules"
slow = slow or statusfile.IsSlow(t.outcomes)
pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
if (skip
or self._FilterSlow(slow, slow_tests)
or self._FilterPassFail(pass_fail, pass_fail_tests)):
continue # "for t in self.tests"
filtered.append(t)
self.tests = filtered
if not warn_unused_rules:
return
if not variants:
for rule in self.rules[""]:
if (rule, "") not in used_rules:
print("Unused rule: %s -> %s (variant independent)" % (
rule, self.rules[""][rule]))
for rule in self.prefix_rules[""]:
if (rule, "") not in used_rules:
print("Unused rule: %s -> %s (variant independent)" % (
rule, self.prefix_rules[""][rule]))
else:
for variant in ALL_VARIANTS:
for rule in self.rules[variant]:
if (rule, variant) not in used_rules:
print("Unused rule: %s -> %s (variant: %s)" % (
rule, self.rules[variant][rule], variant))
for rule in self.prefix_rules[variant]:
if (rule, variant) not in used_rules:
print("Unused rule: %s -> %s (variant: %s)" % (
rule, self.prefix_rules[variant][rule], variant))
if statusfile.DoSkip(self.prefix_rules[variant][prefix]):
break
for variant in variants:
for rule, value in (list(self.rules.get(variant, {}).iteritems()) +
list(self.prefix_rules.get(variant, {}).iteritems())):
if (rule, variant) not in used_rules:
if variant == '':
variant_desc = 'variant independent'
else:
variant_desc = 'variant: %s' % variant
print('Unused rule: %s -> %s (%s)' % (rule, value, variant_desc))
def FilterTestCasesByArgs(self, args):
"""Filter test cases based on command-line arguments.
......@@ -256,7 +256,40 @@ class TestSuite(object):
break
self.tests = filtered
def GetOutcomesForTestCase(self, testcase):
"""Gets outcomes from status file.
Merges variant dependent and independent rules. Status file has to be loaded
before using this function.
"""
variant = testcase.variant or ''
testname = self.CommonTestName(testcase)
cache_key = '%s$%s' % (testname, variant)
if cache_key not in self._outcomes_cache:
# Load statusfile to get outcomes for the first time.
assert(self.rules is not None)
assert(self.prefix_rules is not None)
outcomes = frozenset()
for key in set([variant, '']):
rules = self.rules.get(key, {})
prefix_rules = self.prefix_rules.get(key, {})
if testname in rules:
outcomes |= rules[testname]
for prefix in prefix_rules:
if testname.startswith(prefix):
outcomes |= prefix_rules[prefix]
self._outcomes_cache[cache_key] = outcomes
return self._outcomes_cache[cache_key]
def GetShellForTestCase(self, testcase):
"""Returns shell to be executed for this test case."""
return 'd8'
def GetParametersForTestCase(self, testcase, context):
......@@ -291,7 +324,8 @@ class TestSuite(object):
def HasUnexpectedOutput(self, testcase):
outcome = self.GetOutcome(testcase)
return not outcome in (testcase.outcomes or [statusfile.PASS])
return not outcome in (self.GetOutcomesForTestCase(testcase)
or [statusfile.PASS])
def StripOutputForTransmit(self, testcase):
if not self.HasUnexpectedOutput(testcase):
......
......@@ -34,12 +34,13 @@ class TestSuiteTest(unittest.TestCase):
'baz/': set(['PASS', 'SLOW']),
},
}
suite.FilterTestCasesByStatus(warn_unused_rules=False)
suite.FilterTestCasesByStatus()
self.assertEquals(
[TestCase(suite, 'baz/bar')],
suite.tests,
)
self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), suite.tests[0].outcomes)
outcomes = suite.GetOutcomesForTestCase(suite.tests[0])
self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), outcomes)
def test_filter_testcases_by_status_second_pass(self):
suite = TestSuite('foo', 'bar')
......@@ -47,10 +48,6 @@ class TestSuiteTest(unittest.TestCase):
test1 = TestCase(suite, 'foo/bar')
test2 = TestCase(suite, 'baz/bar')
# Contrived outcomes from filtering by variant-independent rules.
test1.outcomes = set(['PREV'])
test2.outcomes = set(['PREV'])
suite.tests = [
test1.CopyAddingFlags(variant='default', flags=[]),
test1.CopyAddingFlags(variant='stress', flags=['-v']),
......@@ -59,6 +56,9 @@ class TestSuiteTest(unittest.TestCase):
]
suite.rules = {
'': {
'foo/bar': set(['PREV']),
},
'default': {
'foo/bar': set(['PASS', 'SKIP']),
'baz/bar': set(['PASS', 'FAIL']),
......@@ -68,6 +68,9 @@ class TestSuiteTest(unittest.TestCase):
},
}
suite.prefix_rules = {
'': {
'baz/': set(['PREV']),
},
'default': {
'baz/': set(['PASS', 'SLOW']),
},
......@@ -75,7 +78,7 @@ class TestSuiteTest(unittest.TestCase):
'foo/': set(['PASS', 'SLOW']),
},
}
suite.FilterTestCasesByStatus(warn_unused_rules=False, variants=True)
suite.FilterTestCasesByStatus()
self.assertEquals(
[
TestCase(suite, 'foo/bar', flags=['-v']),
......@@ -85,12 +88,12 @@ class TestSuiteTest(unittest.TestCase):
)
self.assertEquals(
set(['PASS', 'SLOW', 'PREV']),
suite.tests[0].outcomes,
set(['PREV', 'PASS', 'SLOW']),
suite.GetOutcomesForTestCase(suite.tests[0]),
)
self.assertEquals(
set(['PASS', 'FAIL', 'SLOW', 'PREV']),
suite.tests[1].outcomes,
set(['PREV', 'PASS', 'FAIL', 'SLOW']),
suite.GetOutcomesForTestCase(suite.tests[1]),
)
......
......@@ -46,18 +46,24 @@ def PrintReport(tests):
total = len(tests)
skipped = timeout = nocrash = passes = fail_ok = fail = 0
for t in tests:
if "outcomes" not in dir(t) or not t.outcomes:
outcomes = t.suite.GetOutcomesForTestCase(t)
if not outcomes:
passes += 1
continue
o = t.outcomes
if statusfile.DoSkip(o):
if statusfile.DoSkip(outcomes):
skipped += 1
continue
if statusfile.TIMEOUT in o: timeout += 1
if statusfile.IsPassOrFail(o): nocrash += 1
if list(o) == [statusfile.PASS]: passes += 1
if statusfile.IsFailOk(o): fail_ok += 1
if list(o) == [statusfile.FAIL]: fail += 1
if statusfile.TIMEOUT in outcomes:
timeout += 1
if statusfile.IsPassOrFail(outcomes):
nocrash += 1
if list(outcomes) == [statusfile.PASS]:
passes += 1
if statusfile.IsFailOk(outcomes):
fail_ok += 1
if list(outcomes) == [statusfile.FAIL]:
fail += 1
print REPORT_TEMPLATE % {
"total": total,
"skipped": skipped,
......
......@@ -32,16 +32,13 @@ class TestCase(object):
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
self.variant = variant # name of the used testing variant
self.outcomes = frozenset([])
self.output = None
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
def CopyAddingFlags(self, variant, flags):
copy = TestCase(self.suite, self.path, variant, self.flags + flags)
copy.outcomes = self.outcomes
return copy
return TestCase(self.suite, self.path, variant, self.flags + flags)
def SetSuiteObject(self, suites):
self.suite = suites[self.suite]
......
......@@ -410,10 +410,11 @@ class StandardTestRunner(base_runner.BaseTestRunner):
s.FilterTestCasesByArgs(args)
all_tests += s.tests
# First filtering by status applying the generic rules (independent of
# variants).
s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
options.pass_fail_tests)
# First filtering by status applying the generic rules (tests without
# variants)
if options.warn_unused:
s.WarnUnusedRules(check_variant_rules=False)
s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
......@@ -442,9 +443,13 @@ class StandardTestRunner(base_runner.BaseTestRunner):
else:
s.tests = variant_tests
# Second filtering by status applying the variant-dependent rules.
s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
options.pass_fail_tests, variants=True)
# Second filtering by status applying also the variant-dependent rules.
if options.warn_unused:
s.WarnUnusedRules(check_variant_rules=True)
s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
for t in s.tests:
t.flags += s.GetStatusfileFlags(t)
s.tests = self._shard_tests(s.tests, options)
num_tests += len(s.tests)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment