Commit 0f2223c8 authored by Michal Majewski's avatar Michal Majewski Committed by Commit Bot

[test] Refactor getting test outcomes from statusfile.

Filtering by status file split to four parts:
1. Getting outcomes - reads both variant dependent and
independent outcomes, no more need to do it twice.
2. Checking unused rules - has a switch to check only variant
dependent/independent rules.
3. Reading flags - if outcome starts with '--' it is treated as a flag.
4. Actual filtering.

Outcomes removed from the testcase object, can be accessed
by call to its testsuite.

Bug: v8:6917
Cq-Include-Trybots: master.tryserver.v8:v8_linux_noi18n_rel_ng
Change-Id: I35762f891010ddda926250452b88656047433daa
Reviewed-on: https://chromium-review.googlesource.com/775160
Commit-Queue: Michał Majewski <majeski@google.com>
Reviewed-by: 's avatarSergiy Byelozyorov <sergiyb@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49547}
parent bd63de9c
...@@ -39,8 +39,8 @@ class BenchmarksVariantGenerator(testsuite.VariantGenerator): ...@@ -39,8 +39,8 @@ class BenchmarksVariantGenerator(testsuite.VariantGenerator):
# always opt to match the way the benchmarks are run for performance # always opt to match the way the benchmarks are run for performance
# testing. # testing.
def FilterVariantsByTest(self, testcase): def FilterVariantsByTest(self, testcase):
if testcase.outcomes and statusfile.OnlyStandardVariant( outcomes = self.suite.GetOutcomesForTestCase(testcase)
testcase.outcomes): if outcomes and statusfile.OnlyStandardVariant(outcomes):
return self.standard_variant return self.standard_variant
return self.fast_variants return self.fast_variants
......
...@@ -102,7 +102,8 @@ FAST_VARIANTS = { ...@@ -102,7 +102,8 @@ FAST_VARIANTS = {
class Test262VariantGenerator(testsuite.VariantGenerator): class Test262VariantGenerator(testsuite.VariantGenerator):
def GetFlagSets(self, testcase, variant): def GetFlagSets(self, testcase, variant):
if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes): outcomes = testcase.suite.GetOutcomesForTestCase(testcase)
if outcomes and statusfile.OnlyFastVariants(outcomes):
variant_flags = FAST_VARIANTS variant_flags = FAST_VARIANTS
else: else:
variant_flags = ALL_VARIANTS variant_flags = ALL_VARIANTS
...@@ -169,7 +170,6 @@ class Test262TestSuite(testsuite.TestSuite): ...@@ -169,7 +170,6 @@ class Test262TestSuite(testsuite.TestSuite):
if "detachArrayBuffer.js" in if "detachArrayBuffer.js" in
self.GetTestRecord(testcase).get("includes", []) self.GetTestRecord(testcase).get("includes", [])
else []) + else []) +
([flag for flag in testcase.outcomes if flag.startswith("--")]) +
([flag for (feature, flag) in FEATURE_FLAGS.items() ([flag for (feature, flag) in FEATURE_FLAGS.items()
if feature in self.GetTestRecord(testcase).get("features", [])]) if feature in self.GetTestRecord(testcase).get("features", [])])
) )
...@@ -244,10 +244,11 @@ class Test262TestSuite(testsuite.TestSuite): ...@@ -244,10 +244,11 @@ class Test262TestSuite(testsuite.TestSuite):
def HasUnexpectedOutput(self, testcase): def HasUnexpectedOutput(self, testcase):
outcome = self.GetOutcome(testcase) outcome = self.GetOutcome(testcase)
if (statusfile.FAIL_SLOPPY in testcase.outcomes and outcomes = self.GetOutcomesForTestCase(testcase)
if (statusfile.FAIL_SLOPPY in outcomes and
"--use-strict" not in testcase.flags): "--use-strict" not in testcase.flags):
return outcome != statusfile.FAIL return outcome != statusfile.FAIL
return not outcome in ([outcome for outcome in testcase.outcomes return not outcome in ([outcome for outcome in outcomes
if not outcome.startswith('--') if not outcome.startswith('--')
and outcome != statusfile.FAIL_SLOPPY] and outcome != statusfile.FAIL_SLOPPY]
or [statusfile.PASS]) or [statusfile.PASS])
......
...@@ -305,6 +305,9 @@ class DeoptFuzzer(base_runner.BaseTestRunner): ...@@ -305,6 +305,9 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
if len(args) > 0: if len(args) > 0:
s.FilterTestCasesByArgs(args) s.FilterTestCasesByArgs(args)
s.FilterTestCasesByStatus(False) s.FilterTestCasesByStatus(False)
for t in s.tests:
t.flags += s.GetStatusfileFlags(t)
test_backup[s] = s.tests test_backup[s] = s.tests
analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT, analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT,
"--print-deopt-stress"] "--print-deopt-stress"]
......
...@@ -292,6 +292,9 @@ class GCFuzzer(base_runner.BaseTestRunner): ...@@ -292,6 +292,9 @@ class GCFuzzer(base_runner.BaseTestRunner):
if len(args) > 0: if len(args) > 0:
s.FilterTestCasesByArgs(args) s.FilterTestCasesByArgs(args)
s.FilterTestCasesByStatus(False) s.FilterTestCasesByStatus(False)
for t in s.tests:
t.flags += s.GetStatusfileFlags(t)
num_tests += len(s.tests) num_tests += len(s.tests)
for t in s.tests: for t in s.tests:
t.id = test_id t.id = test_id
......
...@@ -114,11 +114,9 @@ def _GetInstructions(test, context): ...@@ -114,11 +114,9 @@ def _GetInstructions(test, context):
timeout *= 4 timeout *= 4
if "--noenable-vfp3" in context.extra_flags: if "--noenable-vfp3" in context.extra_flags:
timeout *= 2 timeout *= 2
# FIXME(machenbach): Make this more OO. Don't expose default outcomes or
# the like.
if statusfile.IsSlow(test.outcomes or [statusfile.PASS]): # TODO(majeski): make it slow outcome dependent.
timeout *= 2 timeout *= 2
return Instructions(command, test.id, timeout, context.verbose, env) return Instructions(command, test.id, timeout, context.verbose, env)
...@@ -184,6 +182,7 @@ class TestJob(Job): ...@@ -184,6 +182,7 @@ class TestJob(Job):
self.test.SetSuiteObject(process_context.suites) self.test.SetSuiteObject(process_context.suites)
instr = _GetInstructions(self.test, process_context.context) instr = _GetInstructions(self.test, process_context.context)
except Exception, e: except Exception, e:
# TODO(majeski): Better exception reporting.
return SetupProblem(e, self.test) return SetupProblem(e, self.test)
start_time = time.time() start_time = time.time()
...@@ -210,7 +209,7 @@ class Runner(object): ...@@ -210,7 +209,7 @@ class Runner(object):
self.suite_names = [s.name for s in suites] self.suite_names = [s.name for s in suites]
# Always pre-sort by status file, slowest tests first. # Always pre-sort by status file, slowest tests first.
slow_key = lambda t: statusfile.IsSlow(t.outcomes) slow_key = lambda t: statusfile.IsSlow(t.suite.GetOutcomesForTestCase(t))
self.tests.sort(key=slow_key, reverse=True) self.tests.sort(key=slow_key, reverse=True)
# Sort by stored duration of not opted out. # Sort by stored duration of not opted out.
......
...@@ -336,7 +336,8 @@ class JsonTestProgressIndicator(ProgressIndicator): ...@@ -336,7 +336,8 @@ class JsonTestProgressIndicator(ProgressIndicator):
"flags": test.flags, "flags": test.flags,
"command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""), "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
"duration": test.duration, "duration": test.duration,
"marked_slow": statusfile.IsSlow(test.outcomes), "marked_slow": statusfile.IsSlow(
test.suite.GetOutcomesForTestCase(test)),
} for test in timed_tests[:20] } for test in timed_tests[:20]
] ]
...@@ -369,7 +370,7 @@ class JsonTestProgressIndicator(ProgressIndicator): ...@@ -369,7 +370,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
"stderr": test.output.stderr, "stderr": test.output.stderr,
"exit_code": test.output.exit_code, "exit_code": test.output.exit_code,
"result": test.suite.GetOutcome(test), "result": test.suite.GetOutcome(test),
"expected": list(test.outcomes or ["PASS"]), "expected": list(test.suite.GetOutcomesForTestCase(test) or ["PASS"]),
"duration": test.duration, "duration": test.duration,
# TODO(machenbach): This stores only the global random seed from the # TODO(machenbach): This stores only the global random seed from the
...@@ -416,7 +417,7 @@ class FlakinessTestProgressIndicator(ProgressIndicator): ...@@ -416,7 +417,7 @@ class FlakinessTestProgressIndicator(ProgressIndicator):
# First run of this test. # First run of this test.
expected_outcomes = ([ expected_outcomes = ([
expected expected
for expected in (test.outcomes or ["PASS"]) for expected in (test.suite.GetOutcomesForTestCase(test) or ["PASS"])
if expected in ["PASS", "FAIL", "CRASH", "TIMEOUT"] if expected in ["PASS", "FAIL", "CRASH", "TIMEOUT"]
] or ["PASS"]) ] or ["PASS"])
self.results[key] = { self.results[key] = {
......
...@@ -50,15 +50,17 @@ class VariantGenerator(object): ...@@ -50,15 +50,17 @@ class VariantGenerator(object):
def FilterVariantsByTest(self, testcase): def FilterVariantsByTest(self, testcase):
result = self.all_variants result = self.all_variants
if testcase.outcomes: outcomes = testcase.suite.GetOutcomesForTestCase(testcase)
if statusfile.OnlyStandardVariant(testcase.outcomes): if outcomes:
if statusfile.OnlyStandardVariant(outcomes):
return self.standard_variant return self.standard_variant
if statusfile.OnlyFastVariants(testcase.outcomes): if statusfile.OnlyFastVariants(outcomes):
result = self.fast_variants result = self.fast_variants
return result return result
def GetFlagSets(self, testcase, variant): def GetFlagSets(self, testcase, variant):
if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes): outcomes = testcase.suite.GetOutcomesForTestCase(testcase)
if outcomes and statusfile.OnlyFastVariants(outcomes):
return FAST_VARIANT_FLAGS[variant] return FAST_VARIANT_FLAGS[variant]
else: else:
return ALL_VARIANT_FLAGS[variant] return ALL_VARIANT_FLAGS[variant]
...@@ -90,6 +92,8 @@ class TestSuite(object): ...@@ -90,6 +92,8 @@ class TestSuite(object):
self.prefix_rules = None # {variant: {test name prefix: [rule]}} self.prefix_rules = None # {variant: {test name prefix: [rule]}}
self.total_duration = None # float, assigned on demand self.total_duration = None # float, assigned on demand
self._outcomes_cache = dict()
def suffix(self): def suffix(self):
return ".js" return ".js"
...@@ -136,100 +140,96 @@ class TestSuite(object): ...@@ -136,100 +140,96 @@ class TestSuite(object):
def ReadTestCases(self, context): def ReadTestCases(self, context):
self.tests = self.ListTests(context) self.tests = self.ListTests(context)
@staticmethod def GetStatusfileFlags(self, test):
def _FilterSlow(slow, mode): """Gets runtime flags from a status file.
return (mode == "run" and not slow) or (mode == "skip" and slow)
@staticmethod Every outcome that starts with "--" is a flag. Status file has to be loaded
def _FilterPassFail(pass_fail, mode): before using this function.
return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail) """
flags = []
def FilterTestCasesByStatus(self, warn_unused_rules, for outcome in self.GetOutcomesForTestCase(test):
slow_tests="dontcare", if outcome.startswith('--'):
pass_fail_tests="dontcare", flags.append(outcome)
variants=False): return flags
# Load statusfile before.
assert(self.rules is not None)
assert(self.prefix_rules is not None)
# Use only variants-dependent rules and prefix_rules when filtering
# respective test cases and generic rules when filtering generic test
# cases.
if not variants:
rules = self.rules[""]
prefix_rules = self.prefix_rules[""]
else:
# We set rules and prefix_rules to a variant-specific version for each
# test below.
rules = {}
prefix_rules = {}
filtered = [] def FilterTestCasesByStatus(self,
slow_tests_mode=None,
pass_fail_tests_mode=None):
"""Filters tests by outcomes from status file.
# Remember used rules as tuples of (rule, variant), where variant is "" for Status file has to be loaded before using this function.
# variant-independent rules.
Args:
slow_tests_mode: What to do with slow tests.
pass_fail_tests_mode: What to do with pass or fail tests.
Mode options:
None (default) - don't skip
"skip" - skip if slow/pass_fail
"run" - skip if not slow/pass_fail
"""
def _skip_slow(is_slow, mode):
return (
(mode == 'run' and not is_slow) or
(mode == 'skip' and is_slow))
def _skip_pass_fail(pass_fail, mode):
return (
(mode == 'run' and not pass_fail) or
(mode == 'skip' and pass_fail))
def _compliant(test):
outcomes = self.GetOutcomesForTestCase(test)
if statusfile.DoSkip(outcomes):
return False
if _skip_slow(statusfile.IsSlow(outcomes), slow_tests_mode):
return False
if _skip_pass_fail(statusfile.IsPassOrFail(outcomes),
pass_fail_tests_mode):
return False
return True
self.tests = filter(_compliant, self.tests)
def WarnUnusedRules(self, check_variant_rules=False):
"""Finds and prints unused rules in status file.
Rule X is unused when it doesn't apply to any tests, which can also mean
that all matching tests were skipped by another rule before evaluating X.
Status file has to be loaded before using this function.
"""
if check_variant_rules:
variants = list(ALL_VARIANTS)
else:
variants = ['']
used_rules = set() used_rules = set()
for t in self.tests: for t in self.tests:
slow = False
pass_fail = False
testname = self.CommonTestName(t) testname = self.CommonTestName(t)
variant = t.variant or "" variant = t.variant or ""
if variants:
rules = self.rules[variant] if testname in self.rules.get(variant, {}):
prefix_rules = self.prefix_rules[variant]
if testname in rules:
used_rules.add((testname, variant)) used_rules.add((testname, variant))
# Even for skipped tests, as the TestCase object stays around and if statusfile.DoSkip(self.rules[variant][testname]):
# PrintReport() uses it. continue
t.outcomes = t.outcomes | rules[testname]
if statusfile.DoSkip(t.outcomes): for prefix in self.prefix_rules.get(variant, {}):
continue # Don't add skipped tests to |filtered|.
for outcome in t.outcomes:
if outcome.startswith('--'):
t.flags += outcome.split()
slow = statusfile.IsSlow(t.outcomes)
pass_fail = statusfile.IsPassOrFail(t.outcomes)
skip = False
for prefix in prefix_rules:
if testname.startswith(prefix): if testname.startswith(prefix):
used_rules.add((prefix, variant)) used_rules.add((prefix, variant))
t.outcomes = t.outcomes | prefix_rules[prefix] if statusfile.DoSkip(self.prefix_rules[variant][prefix]):
if statusfile.DoSkip(t.outcomes): break
skip = True
break # "for rule in prefix_rules" for variant in variants:
slow = slow or statusfile.IsSlow(t.outcomes) for rule, value in (list(self.rules.get(variant, {}).iteritems()) +
pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes) list(self.prefix_rules.get(variant, {}).iteritems())):
if (skip if (rule, variant) not in used_rules:
or self._FilterSlow(slow, slow_tests) if variant == '':
or self._FilterPassFail(pass_fail, pass_fail_tests)): variant_desc = 'variant independent'
continue # "for t in self.tests" else:
filtered.append(t) variant_desc = 'variant: %s' % variant
self.tests = filtered print('Unused rule: %s -> %s (%s)' % (rule, value, variant_desc))
if not warn_unused_rules:
return
if not variants:
for rule in self.rules[""]:
if (rule, "") not in used_rules:
print("Unused rule: %s -> %s (variant independent)" % (
rule, self.rules[""][rule]))
for rule in self.prefix_rules[""]:
if (rule, "") not in used_rules:
print("Unused rule: %s -> %s (variant independent)" % (
rule, self.prefix_rules[""][rule]))
else:
for variant in ALL_VARIANTS:
for rule in self.rules[variant]:
if (rule, variant) not in used_rules:
print("Unused rule: %s -> %s (variant: %s)" % (
rule, self.rules[variant][rule], variant))
for rule in self.prefix_rules[variant]:
if (rule, variant) not in used_rules:
print("Unused rule: %s -> %s (variant: %s)" % (
rule, self.prefix_rules[variant][rule], variant))
def FilterTestCasesByArgs(self, args): def FilterTestCasesByArgs(self, args):
"""Filter test cases based on command-line arguments. """Filter test cases based on command-line arguments.
...@@ -256,7 +256,40 @@ class TestSuite(object): ...@@ -256,7 +256,40 @@ class TestSuite(object):
break break
self.tests = filtered self.tests = filtered
def GetOutcomesForTestCase(self, testcase):
"""Gets outcomes from status file.
Merges variant dependent and independent rules. Status file has to be loaded
before using this function.
"""
variant = testcase.variant or ''
testname = self.CommonTestName(testcase)
cache_key = '%s$%s' % (testname, variant)
if cache_key not in self._outcomes_cache:
# Load statusfile to get outcomes for the first time.
assert(self.rules is not None)
assert(self.prefix_rules is not None)
outcomes = frozenset()
for key in set([variant, '']):
rules = self.rules.get(key, {})
prefix_rules = self.prefix_rules.get(key, {})
if testname in rules:
outcomes |= rules[testname]
for prefix in prefix_rules:
if testname.startswith(prefix):
outcomes |= prefix_rules[prefix]
self._outcomes_cache[cache_key] = outcomes
return self._outcomes_cache[cache_key]
def GetShellForTestCase(self, testcase): def GetShellForTestCase(self, testcase):
"""Returns shell to be executed for this test case."""
return 'd8' return 'd8'
def GetParametersForTestCase(self, testcase, context): def GetParametersForTestCase(self, testcase, context):
...@@ -291,7 +324,8 @@ class TestSuite(object): ...@@ -291,7 +324,8 @@ class TestSuite(object):
def HasUnexpectedOutput(self, testcase): def HasUnexpectedOutput(self, testcase):
outcome = self.GetOutcome(testcase) outcome = self.GetOutcome(testcase)
return not outcome in (testcase.outcomes or [statusfile.PASS]) return not outcome in (self.GetOutcomesForTestCase(testcase)
or [statusfile.PASS])
def StripOutputForTransmit(self, testcase): def StripOutputForTransmit(self, testcase):
if not self.HasUnexpectedOutput(testcase): if not self.HasUnexpectedOutput(testcase):
......
...@@ -34,12 +34,13 @@ class TestSuiteTest(unittest.TestCase): ...@@ -34,12 +34,13 @@ class TestSuiteTest(unittest.TestCase):
'baz/': set(['PASS', 'SLOW']), 'baz/': set(['PASS', 'SLOW']),
}, },
} }
suite.FilterTestCasesByStatus(warn_unused_rules=False) suite.FilterTestCasesByStatus()
self.assertEquals( self.assertEquals(
[TestCase(suite, 'baz/bar')], [TestCase(suite, 'baz/bar')],
suite.tests, suite.tests,
) )
self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), suite.tests[0].outcomes) outcomes = suite.GetOutcomesForTestCase(suite.tests[0])
self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), outcomes)
def test_filter_testcases_by_status_second_pass(self): def test_filter_testcases_by_status_second_pass(self):
suite = TestSuite('foo', 'bar') suite = TestSuite('foo', 'bar')
...@@ -47,10 +48,6 @@ class TestSuiteTest(unittest.TestCase): ...@@ -47,10 +48,6 @@ class TestSuiteTest(unittest.TestCase):
test1 = TestCase(suite, 'foo/bar') test1 = TestCase(suite, 'foo/bar')
test2 = TestCase(suite, 'baz/bar') test2 = TestCase(suite, 'baz/bar')
# Contrived outcomes from filtering by variant-independent rules.
test1.outcomes = set(['PREV'])
test2.outcomes = set(['PREV'])
suite.tests = [ suite.tests = [
test1.CopyAddingFlags(variant='default', flags=[]), test1.CopyAddingFlags(variant='default', flags=[]),
test1.CopyAddingFlags(variant='stress', flags=['-v']), test1.CopyAddingFlags(variant='stress', flags=['-v']),
...@@ -59,6 +56,9 @@ class TestSuiteTest(unittest.TestCase): ...@@ -59,6 +56,9 @@ class TestSuiteTest(unittest.TestCase):
] ]
suite.rules = { suite.rules = {
'': {
'foo/bar': set(['PREV']),
},
'default': { 'default': {
'foo/bar': set(['PASS', 'SKIP']), 'foo/bar': set(['PASS', 'SKIP']),
'baz/bar': set(['PASS', 'FAIL']), 'baz/bar': set(['PASS', 'FAIL']),
...@@ -68,6 +68,9 @@ class TestSuiteTest(unittest.TestCase): ...@@ -68,6 +68,9 @@ class TestSuiteTest(unittest.TestCase):
}, },
} }
suite.prefix_rules = { suite.prefix_rules = {
'': {
'baz/': set(['PREV']),
},
'default': { 'default': {
'baz/': set(['PASS', 'SLOW']), 'baz/': set(['PASS', 'SLOW']),
}, },
...@@ -75,7 +78,7 @@ class TestSuiteTest(unittest.TestCase): ...@@ -75,7 +78,7 @@ class TestSuiteTest(unittest.TestCase):
'foo/': set(['PASS', 'SLOW']), 'foo/': set(['PASS', 'SLOW']),
}, },
} }
suite.FilterTestCasesByStatus(warn_unused_rules=False, variants=True) suite.FilterTestCasesByStatus()
self.assertEquals( self.assertEquals(
[ [
TestCase(suite, 'foo/bar', flags=['-v']), TestCase(suite, 'foo/bar', flags=['-v']),
...@@ -85,12 +88,12 @@ class TestSuiteTest(unittest.TestCase): ...@@ -85,12 +88,12 @@ class TestSuiteTest(unittest.TestCase):
) )
self.assertEquals( self.assertEquals(
set(['PASS', 'SLOW', 'PREV']), set(['PREV', 'PASS', 'SLOW']),
suite.tests[0].outcomes, suite.GetOutcomesForTestCase(suite.tests[0]),
) )
self.assertEquals( self.assertEquals(
set(['PASS', 'FAIL', 'SLOW', 'PREV']), set(['PREV', 'PASS', 'FAIL', 'SLOW']),
suite.tests[1].outcomes, suite.GetOutcomesForTestCase(suite.tests[1]),
) )
......
...@@ -46,18 +46,24 @@ def PrintReport(tests): ...@@ -46,18 +46,24 @@ def PrintReport(tests):
total = len(tests) total = len(tests)
skipped = timeout = nocrash = passes = fail_ok = fail = 0 skipped = timeout = nocrash = passes = fail_ok = fail = 0
for t in tests: for t in tests:
if "outcomes" not in dir(t) or not t.outcomes: outcomes = t.suite.GetOutcomesForTestCase(t)
if not outcomes:
passes += 1 passes += 1
continue continue
o = t.outcomes if statusfile.DoSkip(outcomes):
if statusfile.DoSkip(o):
skipped += 1 skipped += 1
continue continue
if statusfile.TIMEOUT in o: timeout += 1 if statusfile.TIMEOUT in outcomes:
if statusfile.IsPassOrFail(o): nocrash += 1 timeout += 1
if list(o) == [statusfile.PASS]: passes += 1 if statusfile.IsPassOrFail(outcomes):
if statusfile.IsFailOk(o): fail_ok += 1 nocrash += 1
if list(o) == [statusfile.FAIL]: fail += 1 if list(outcomes) == [statusfile.PASS]:
passes += 1
if statusfile.IsFailOk(outcomes):
fail_ok += 1
if list(outcomes) == [statusfile.FAIL]:
fail += 1
print REPORT_TEMPLATE % { print REPORT_TEMPLATE % {
"total": total, "total": total,
"skipped": skipped, "skipped": skipped,
......
...@@ -32,16 +32,13 @@ class TestCase(object): ...@@ -32,16 +32,13 @@ class TestCase(object):
self.path = path # string, e.g. 'div-mod', 'test-api/foo' self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test self.flags = flags or [] # list of strings, flags specific to this test
self.variant = variant # name of the used testing variant self.variant = variant # name of the used testing variant
self.outcomes = frozenset([])
self.output = None self.output = None
self.id = None # int, used to map result back to TestCase instance self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed. self.run = 1 # The nth time this test is executed.
def CopyAddingFlags(self, variant, flags): def CopyAddingFlags(self, variant, flags):
copy = TestCase(self.suite, self.path, variant, self.flags + flags) return TestCase(self.suite, self.path, variant, self.flags + flags)
copy.outcomes = self.outcomes
return copy
def SetSuiteObject(self, suites): def SetSuiteObject(self, suites):
self.suite = suites[self.suite] self.suite = suites[self.suite]
......
...@@ -410,10 +410,11 @@ class StandardTestRunner(base_runner.BaseTestRunner): ...@@ -410,10 +410,11 @@ class StandardTestRunner(base_runner.BaseTestRunner):
s.FilterTestCasesByArgs(args) s.FilterTestCasesByArgs(args)
all_tests += s.tests all_tests += s.tests
# First filtering by status applying the generic rules (independent of # First filtering by status applying the generic rules (tests without
# variants). # variants)
s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests, if options.warn_unused:
options.pass_fail_tests) s.WarnUnusedRules(check_variant_rules=False)
s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
if options.cat: if options.cat:
verbose.PrintTestSource(s.tests) verbose.PrintTestSource(s.tests)
...@@ -442,9 +443,13 @@ class StandardTestRunner(base_runner.BaseTestRunner): ...@@ -442,9 +443,13 @@ class StandardTestRunner(base_runner.BaseTestRunner):
else: else:
s.tests = variant_tests s.tests = variant_tests
# Second filtering by status applying the variant-dependent rules. # Second filtering by status applying also the variant-dependent rules.
s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests, if options.warn_unused:
options.pass_fail_tests, variants=True) s.WarnUnusedRules(check_variant_rules=True)
s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
for t in s.tests:
t.flags += s.GetStatusfileFlags(t)
s.tests = self._shard_tests(s.tests, options) s.tests = self._shard_tests(s.tests, options)
num_tests += len(s.tests) num_tests += len(s.tests)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment