Add flaky test classification feature to test suites.

Test expectations can now include outcome: FLAKY.

The test runner can now run a class of tests (flaky|non-flaky|all). All tests are in the non-flaky class that are not marked as FLAKY.

The slash correction for windows is now pulled into the test name method. Currently the progress output on windows contains a mixture of / and \.

R=jkummerow@chromium.org

Review URL: https://codereview.chromium.org/22381003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16080 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent fa2381eb
...@@ -94,6 +94,9 @@ def BuildOptions(): ...@@ -94,6 +94,9 @@ def BuildOptions():
default=False, action="store_true") default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests", result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true") default=False, action="store_true")
result.add_option("--flaky-tests",
help="Regard tests marked as flaky (run|skip|dontcare)",
default="dontcare")
result.add_option("--command-prefix", result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test", help="Prepended to each shell command used to run a test",
default="") default="")
...@@ -204,6 +207,9 @@ def ProcessOptions(options): ...@@ -204,6 +207,9 @@ def ProcessOptions(options):
# This is OK for distributed running, so we don't need to set no_network. # This is OK for distributed running, so we don't need to set no_network.
options.command_prefix = (["python", "-u", run_valgrind] + options.command_prefix = (["python", "-u", run_valgrind] +
options.command_prefix) options.command_prefix)
if not options.flaky_tests in ["run", "skip", "dontcare"]:
print "Unknown flaky test mode %s" % options.flaky_tests
return False
return True return True
...@@ -315,7 +321,7 @@ def Execute(arch, mode, args, options, suites, workspace): ...@@ -315,7 +321,7 @@ def Execute(arch, mode, args, options, suites, workspace):
if len(args) > 0: if len(args) > 0:
s.FilterTestCasesByArgs(args) s.FilterTestCasesByArgs(args)
all_tests += s.tests all_tests += s.tests
s.FilterTestCasesByStatus(options.warn_unused) s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests)
if options.cat: if options.cat:
verbose.PrintTestSource(s.tests) verbose.PrintTestSource(s.tests)
continue continue
......
...@@ -37,6 +37,7 @@ OKAY = 'OKAY' ...@@ -37,6 +37,7 @@ OKAY = 'OKAY'
TIMEOUT = 'TIMEOUT' TIMEOUT = 'TIMEOUT'
CRASH = 'CRASH' CRASH = 'CRASH'
SLOW = 'SLOW' SLOW = 'SLOW'
FLAKY = 'FLAKY'
# These are just for the status files and are mapped below in DEFS: # These are just for the status files and are mapped below in DEFS:
FAIL_OK = 'FAIL_OK' FAIL_OK = 'FAIL_OK'
PASS_OR_FAIL = 'PASS_OR_FAIL' PASS_OR_FAIL = 'PASS_OR_FAIL'
...@@ -48,6 +49,7 @@ KEYWORDS = {SKIP: SKIP, ...@@ -48,6 +49,7 @@ KEYWORDS = {SKIP: SKIP,
TIMEOUT: TIMEOUT, TIMEOUT: TIMEOUT,
CRASH: CRASH, CRASH: CRASH,
SLOW: SLOW, SLOW: SLOW,
FLAKY: FLAKY,
FAIL_OK: FAIL_OK, FAIL_OK: FAIL_OK,
PASS_OR_FAIL: PASS_OR_FAIL} PASS_OR_FAIL: PASS_OR_FAIL}
......
...@@ -42,6 +42,7 @@ OKAY = "OKAY" ...@@ -42,6 +42,7 @@ OKAY = "OKAY"
TIMEOUT = "TIMEOUT" TIMEOUT = "TIMEOUT"
CRASH = "CRASH" CRASH = "CRASH"
SLOW = "SLOW" SLOW = "SLOW"
FLAKY = "FLAKY"
# These are just for the status files and are mapped below in DEFS: # These are just for the status files and are mapped below in DEFS:
FAIL_OK = "FAIL_OK" FAIL_OK = "FAIL_OK"
PASS_OR_FAIL = "PASS_OR_FAIL" PASS_OR_FAIL = "PASS_OR_FAIL"
...@@ -49,7 +50,7 @@ PASS_OR_FAIL = "PASS_OR_FAIL" ...@@ -49,7 +50,7 @@ PASS_OR_FAIL = "PASS_OR_FAIL"
ALWAYS = "ALWAYS" ALWAYS = "ALWAYS"
KEYWORDS = {} KEYWORDS = {}
for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FAIL_OK, for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FLAKY, FAIL_OK,
PASS_OR_FAIL, ALWAYS]: PASS_OR_FAIL, ALWAYS]:
KEYWORDS[key] = key KEYWORDS[key] = key
...@@ -68,6 +69,10 @@ def DoSkip(outcomes): ...@@ -68,6 +69,10 @@ def DoSkip(outcomes):
def IsFlaky(outcomes): def IsFlaky(outcomes):
return FLAKY in outcomes
def IsPassOrFail(outcomes):
return ((PASS in outcomes) and (FAIL in outcomes) and return ((PASS in outcomes) and (FAIL in outcomes) and
(not CRASH in outcomes) and (not OKAY in outcomes)) (not CRASH in outcomes) and (not OKAY in outcomes))
......
...@@ -66,6 +66,9 @@ class TestSuite(object): ...@@ -66,6 +66,9 @@ class TestSuite(object):
# Used in the status file and for stdout printing. # Used in the status file and for stdout printing.
def CommonTestName(self, testcase): def CommonTestName(self, testcase):
if utils.IsWindows():
return testcase.path.replace("\\", "/")
else:
return testcase.path return testcase.path
def ListTests(self, context): def ListTests(self, context):
...@@ -84,32 +87,36 @@ class TestSuite(object): ...@@ -84,32 +87,36 @@ class TestSuite(object):
def ReadTestCases(self, context): def ReadTestCases(self, context):
self.tests = self.ListTests(context) self.tests = self.ListTests(context)
def FilterTestCasesByStatus(self, warn_unused_rules): @staticmethod
def _FilterFlaky(flaky, mode):
return (mode == "run" and not flaky) or (mode == "skip" and flaky)
def FilterTestCasesByStatus(self, warn_unused_rules, flaky_tests="dontcare"):
filtered = [] filtered = []
used_rules = set() used_rules = set()
for t in self.tests: for t in self.tests:
flaky = False
testname = self.CommonTestName(t) testname = self.CommonTestName(t)
if utils.IsWindows():
testname = testname.replace("\\", "/")
if testname in self.rules: if testname in self.rules:
used_rules.add(testname) used_rules.add(testname)
outcomes = self.rules[testname] # Even for skipped tests, as the TestCase object stays around and
t.outcomes = outcomes # Even for skipped tests, as the TestCase # PrintReport() uses it.
# object stays around and PrintReport() uses it. t.outcomes = self.rules[testname]
if statusfile.DoSkip(outcomes): if statusfile.DoSkip(t.outcomes):
continue # Don't add skipped tests to |filtered|. continue # Don't add skipped tests to |filtered|.
if len(self.wildcards) != 0: flaky = statusfile.IsFlaky(t.outcomes)
skip = False skip = False
for rule in self.wildcards: for rule in self.wildcards:
assert rule[-1] == '*' assert rule[-1] == '*'
if testname.startswith(rule[:-1]): if testname.startswith(rule[:-1]):
used_rules.add(rule) used_rules.add(rule)
outcomes = self.wildcards[rule] t.outcomes = self.wildcards[rule]
t.outcomes = outcomes if statusfile.DoSkip(t.outcomes):
if statusfile.DoSkip(outcomes):
skip = True skip = True
break # "for rule in self.wildcards" break # "for rule in self.wildcards"
if skip: continue # "for t in self.tests" flaky = flaky or statusfile.IsFlaky(t.outcomes)
if skip or self._FilterFlaky(flaky, flaky_tests):
continue # "for t in self.tests"
filtered.append(t) filtered.append(t)
self.tests = filtered self.tests = filtered
......
...@@ -54,7 +54,7 @@ def PrintReport(tests): ...@@ -54,7 +54,7 @@ def PrintReport(tests):
skipped += 1 skipped += 1
continue continue
if statusfile.TIMEOUT in o: timeout += 1 if statusfile.TIMEOUT in o: timeout += 1
if statusfile.IsFlaky(o): nocrash += 1 if statusfile.IsPassOrFail(o): nocrash += 1
if list(o) == [statusfile.PASS]: passes += 1 if list(o) == [statusfile.PASS]: passes += 1
if statusfile.IsFailOk(o): fail_ok += 1 if statusfile.IsFailOk(o): fail_ok += 1
if list(o) == [statusfile.FAIL]: fail += 1 if list(o) == [statusfile.FAIL]: fail += 1
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment