Add flaky test classification feature to test suites.

Test expectations can now include outcome: FLAKY.

The test runner can now run a class of tests (flaky|non-flaky|all). All tests are in the non-flaky class that are not marked as FLAKY.

The slash correction for windows is now pulled into the test name method. Currently the progress output on windows contains a mixture of / and \.

R=jkummerow@chromium.org

Review URL: https://codereview.chromium.org/22381003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16080 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent fa2381eb
......@@ -94,6 +94,9 @@ def BuildOptions():
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--flaky-tests",
help="Regard tests marked as flaky (run|skip|dontcare)",
default="dontcare")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
......@@ -204,6 +207,9 @@ def ProcessOptions(options):
# This is OK for distributed running, so we don't need to set no_network.
options.command_prefix = (["python", "-u", run_valgrind] +
options.command_prefix)
if not options.flaky_tests in ["run", "skip", "dontcare"]:
print "Unknown flaky test mode %s" % options.flaky_tests
return False
return True
......@@ -315,7 +321,7 @@ def Execute(arch, mode, args, options, suites, workspace):
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
s.FilterTestCasesByStatus(options.warn_unused)
s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
......
......@@ -37,6 +37,7 @@ OKAY = 'OKAY'
TIMEOUT = 'TIMEOUT'
CRASH = 'CRASH'
SLOW = 'SLOW'
FLAKY = 'FLAKY'
# These are just for the status files and are mapped below in DEFS:
FAIL_OK = 'FAIL_OK'
PASS_OR_FAIL = 'PASS_OR_FAIL'
......@@ -48,6 +49,7 @@ KEYWORDS = {SKIP: SKIP,
TIMEOUT: TIMEOUT,
CRASH: CRASH,
SLOW: SLOW,
FLAKY: FLAKY,
FAIL_OK: FAIL_OK,
PASS_OR_FAIL: PASS_OR_FAIL}
......
......@@ -42,6 +42,7 @@ OKAY = "OKAY"
TIMEOUT = "TIMEOUT"
CRASH = "CRASH"
SLOW = "SLOW"
FLAKY = "FLAKY"
# These are just for the status files and are mapped below in DEFS:
FAIL_OK = "FAIL_OK"
PASS_OR_FAIL = "PASS_OR_FAIL"
......@@ -49,7 +50,7 @@ PASS_OR_FAIL = "PASS_OR_FAIL"
ALWAYS = "ALWAYS"
KEYWORDS = {}
for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FAIL_OK,
for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FLAKY, FAIL_OK,
PASS_OR_FAIL, ALWAYS]:
KEYWORDS[key] = key
......@@ -68,6 +69,10 @@ def DoSkip(outcomes):
def IsFlaky(outcomes):
return FLAKY in outcomes
def IsPassOrFail(outcomes):
return ((PASS in outcomes) and (FAIL in outcomes) and
(not CRASH in outcomes) and (not OKAY in outcomes))
......
......@@ -66,7 +66,10 @@ class TestSuite(object):
# Used in the status file and for stdout printing.
def CommonTestName(self, testcase):
return testcase.path
if utils.IsWindows():
return testcase.path.replace("\\", "/")
else:
return testcase.path
def ListTests(self, context):
raise NotImplementedError
......@@ -84,32 +87,36 @@ class TestSuite(object):
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
def FilterTestCasesByStatus(self, warn_unused_rules):
@staticmethod
def _FilterFlaky(flaky, mode):
return (mode == "run" and not flaky) or (mode == "skip" and flaky)
def FilterTestCasesByStatus(self, warn_unused_rules, flaky_tests="dontcare"):
filtered = []
used_rules = set()
for t in self.tests:
flaky = False
testname = self.CommonTestName(t)
if utils.IsWindows():
testname = testname.replace("\\", "/")
if testname in self.rules:
used_rules.add(testname)
outcomes = self.rules[testname]
t.outcomes = outcomes # Even for skipped tests, as the TestCase
# object stays around and PrintReport() uses it.
if statusfile.DoSkip(outcomes):
# Even for skipped tests, as the TestCase object stays around and
# PrintReport() uses it.
t.outcomes = self.rules[testname]
if statusfile.DoSkip(t.outcomes):
continue # Don't add skipped tests to |filtered|.
if len(self.wildcards) != 0:
skip = False
for rule in self.wildcards:
assert rule[-1] == '*'
if testname.startswith(rule[:-1]):
used_rules.add(rule)
outcomes = self.wildcards[rule]
t.outcomes = outcomes
if statusfile.DoSkip(outcomes):
skip = True
break # "for rule in self.wildcards"
if skip: continue # "for t in self.tests"
flaky = statusfile.IsFlaky(t.outcomes)
skip = False
for rule in self.wildcards:
assert rule[-1] == '*'
if testname.startswith(rule[:-1]):
used_rules.add(rule)
t.outcomes = self.wildcards[rule]
if statusfile.DoSkip(t.outcomes):
skip = True
break # "for rule in self.wildcards"
flaky = flaky or statusfile.IsFlaky(t.outcomes)
if skip or self._FilterFlaky(flaky, flaky_tests):
continue # "for t in self.tests"
filtered.append(t)
self.tests = filtered
......
......@@ -54,7 +54,7 @@ def PrintReport(tests):
skipped += 1
continue
if statusfile.TIMEOUT in o: timeout += 1
if statusfile.IsFlaky(o): nocrash += 1
if statusfile.IsPassOrFail(o): nocrash += 1
if list(o) == [statusfile.PASS]: passes += 1
if statusfile.IsFailOk(o): fail_ok += 1
if list(o) == [statusfile.FAIL]: fail += 1
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment