Commit af120db4 authored by Sergiy Byelozyorov's avatar Sergiy Byelozyorov Committed by Commit Bot

[tools] Correctly identify and report test crashes and infra failures

We define a TestFailedError exception and raise it when we can reliably detect
that a test has crashed. All other exceptions are treated as infra failures and
are captured by the try-catch clause in MainWrapper function.

This also fixes all tests in run_perf_test.py, run_tests_test.py and makes sure
that both are run on any changes in tools directory.

R=machenbach@chromium.org

Bug: chromium:899028
Change-Id: I283bc87b31c814be476bebe9fdda414975494183
Reviewed-on: https://chromium-review.googlesource.com/c/1303293
Commit-Queue: Sergiy Byelozyorov <sergiyb@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57134}
parent 6d9c30cd
......@@ -43,3 +43,26 @@ wheel: <
platform: "win_amd64"
>
>
# Used by:
# tools/unittests/run_perf_test.py
wheel: <
name: "infra/python/wheels/coverage/${vpython_platform}"
version: "version:4.3.4"
>
wheel: <
name: "infra/python/wheels/six-py2_py3"
version: "version:1.10.0"
>
wheel: <
name: "infra/python/wheels/pbr-py2_py3"
version: "version:3.0.0"
>
wheel: <
name: "infra/python/wheels/funcsigs-py2_py3"
version: "version:1.0.2"
>
wheel: <
name: "infra/python/wheels/mock-py2_py3"
version: "version:2.0.0"
>
......@@ -73,10 +73,10 @@ def _V8PresubmitChecks(input_api, output_api):
import sys
sys.path.append(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor
from presubmit import TorqueFormatProcessor
from presubmit import SourceProcessor
from presubmit import StatusFilesProcessor
from v8_presubmit import CppLintProcessor
from v8_presubmit import TorqueFormatProcessor
from v8_presubmit import SourceProcessor
from v8_presubmit import StatusFilesProcessor
def FilterFile(affected_file):
return input_api.FilterSourceFile(
......
......@@ -138,6 +138,11 @@ def GeometricMean(values):
return str(math.exp(sum(map(math.log, values)) / len(values)))
class TestFailedError(Exception):
"""Error raised when a test has failed due to a non-infra issue."""
pass
class Results(object):
"""Place holder for result traces."""
def __init__(self, traces=None, errors=None):
......@@ -692,7 +697,7 @@ class DesktopPlatform(Platform):
output = cmd.execute()
except OSError: # pragma: no cover
logging.exception(title % "OSError")
return ""
raise
logging.info(title % "Stdout" + "\n%s", output.stdout)
if output.stderr: # pragma: no cover
......@@ -700,6 +705,10 @@ class DesktopPlatform(Platform):
logging.info(title % "Stderr" + "\n%s", output.stderr)
if output.timed_out:
logging.warning(">>> Test timed out after %ss.", runnable.timeout)
raise TestFailedError()
if output.exit_code != 0:
logging.warning(">>> Test crashed.")
raise TestFailedError()
if '--prof' in self.extra_flags:
os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
if os_prefix:
......@@ -781,12 +790,13 @@ class AndroidPlatform(Platform): # pragma: no cover
logging.info(title % "Stdout" + "\n%s", stdout)
except android.CommandFailedException as e:
logging.info(title % "Stdout" + "\n%s", e.output)
raise
logging.warning('>>> Test crashed.')
raise TestFailedError()
except android.TimeoutException as e:
if e.output is not None:
logging.info(title % "Stdout" + "\n%s", e.output)
logging.warning(">>> Test timed out after %ss.", runnable.timeout)
stdout = ""
raise TestFailedError()
if runnable.process_size:
return stdout + "MaxMemory: Unsupported"
return stdout
......@@ -1027,6 +1037,8 @@ def Main(args):
results = Results()
results_secondary = Results()
# We use list here to allow modification in nested function below.
have_failed_tests = [False]
with CustomMachineConfiguration(governor = options.cpu_governor,
disable_aslr = options.noaslr) as conf:
for path in args:
......@@ -1065,7 +1077,10 @@ def Main(args):
for i in xrange(0, max(1, total_runs)):
# TODO(machenbach): Allow timeout per arch like with run_count per
# arch.
yield platform.Run(runnable, i)
try:
yield platform.Run(runnable, i)
except TestFailedError:
have_failed_tests[0] = True
# Let runnable iterate over all runs and handle output.
result, result_secondary = runnable.Run(
......@@ -1084,7 +1099,7 @@ def Main(args):
else: # pragma: no cover
print results_secondary
if results.errors:
if results.errors or have_failed_tests[0]:
return 1
return 0
......
......@@ -5,5 +5,6 @@
def CheckChangeOnCommit(input_api, output_api):
# TODO(machenbach): Run all unittests.
tests = input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api, '.', whitelist=['run_tests_test.py$'])
input_api, output_api, '.',
whitelist=['run_tests_test.py$', 'run_perf_test.py$'])
return input_api.RunTests(tests)
......@@ -85,7 +85,7 @@ V8_GENERIC_JSON = {
"units": "ms",
}
Output = namedtuple("Output", "stdout, stderr, timed_out")
Output = namedtuple("Output", "stdout, stderr, timed_out, exit_code")
class PerfTest(unittest.TestCase):
@classmethod
......@@ -113,6 +113,7 @@ class PerfTest(unittest.TestCase):
os.makedirs(TEST_WORKSPACE)
def tearDown(self):
patch.stopall()
if path.exists(TEST_WORKSPACE):
shutil.rmtree(TEST_WORKSPACE)
......@@ -125,7 +126,8 @@ class PerfTest(unittest.TestCase):
# Fake output for each test run.
test_outputs = [Output(stdout=arg,
stderr=None,
timed_out=kwargs.get("timed_out", False))
timed_out=kwargs.get("timed_out", False),
exit_code=kwargs.get("exit_code", 0))
for arg in args[1]]
def create_cmd(*args, **kwargs):
cmd = MagicMock()
......@@ -134,7 +136,9 @@ class PerfTest(unittest.TestCase):
cmd.execute = MagicMock(side_effect=execute)
return cmd
command.Command = MagicMock(side_effect=create_cmd)
patch.object(
run_perf.command, 'PosixCommand',
MagicMock(side_effect=create_cmd)).start()
# Check that d8 is called from the correct cwd for each test run.
dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
......@@ -402,6 +406,18 @@ class PerfTest(unittest.TestCase):
self._VerifyErrors(["Found non-numeric in test/Infra/Constant4"])
self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "")
def testOneRunCrashed(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(
["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"], exit_code=1)
self.assertEquals(1, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": [], "stddev": ""},
{"name": "DeltaBlue", "results": [], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testOneRunTimingOut(self):
test_input = dict(V8_JSON)
test_input["timeout"] = 70
......@@ -412,10 +428,7 @@ class PerfTest(unittest.TestCase):
{"name": "Richards", "results": [], "stddev": ""},
{"name": "DeltaBlue", "results": [], "stddev": ""},
])
self._VerifyErrors([
"Regexp \"^Richards: (.+)$\" didn't match for test test/Richards.",
"Regexp \"^DeltaBlue: (.+)$\" didn't match for test test/DeltaBlue.",
])
self._VerifyErrors([])
self._VerifyMock(
path.join("out", "x64.release", "d7"), "--flag", "run.js", timeout=70)
......
......@@ -103,8 +103,7 @@ def run_tests(basedir, *args, **kwargs):
sys_args.append('--infra-staging')
else:
sys_args.append('--no-infra-staging')
code = standard_runner.StandardTestRunner(
basedir=basedir).execute(sys_args)
code = standard_runner.StandardTestRunner(basedir=basedir).execute(sys_args)
return Result(stdout.getvalue(), stderr.getvalue(), code)
......@@ -247,7 +246,8 @@ class SystemTest(unittest.TestCase):
self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def check_cleaned_json_output(self, expected_results_name, actual_json):
def check_cleaned_json_output(
self, expected_results_name, actual_json, basedir):
# Check relevant properties of the json output.
with open(actual_json) as f:
json_output = json.load(f)[0]
......@@ -260,6 +260,7 @@ class SystemTest(unittest.TestCase):
data['duration'] = 1
data['command'] = ' '.join(
['/usr/bin/python'] + data['command'].split()[1:])
data['command'] = data['command'].replace(basedir + '/', '')
for data in json_output['slowest_tests']:
replace_variable_data(data)
for data in json_output['results']:
......@@ -310,7 +311,8 @@ class SystemTest(unittest.TestCase):
# After recent changes we report all flags, including the file names.
# This is redundant to the command. Needs investigation.
self.maxDiff = None
self.check_cleaned_json_output('expected_test_results1.json', json_path)
self.check_cleaned_json_output(
'expected_test_results1.json', json_path, basedir)
def testFlakeWithRerunAndJSONProc(self):
self.testFlakeWithRerunAndJSON(infra_staging=True)
......@@ -342,7 +344,8 @@ class SystemTest(unittest.TestCase):
self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result)
self.maxDiff = None
self.check_cleaned_json_output('expected_test_results2.json', json_path)
self.check_cleaned_json_output(
'expected_test_results2.json', json_path, basedir)
def testAutoDetect(self):
"""Fake a build with several auto-detected options.
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment