Commit 325a56cd authored by Michael Achenbach's avatar Michael Achenbach Committed by Commit Bot

[test] Move predictable testing into a python wrapper

This moves the verify-predictable logic from the test runner into
a python wrapper script.

This revealed two more tests that don't print allocations, which are
now skipped.

Bug: v8:7166, v8:7177
Change-Id: Ie4a541cb2a20900414ffe1caf4b3fccc4a5edb52
Reviewed-on: https://chromium-review.googlesource.com/808971
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Reviewed-by: 's avatarSergiy Byelozyorov <sergiyb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49964}
parent 301d74ce
......@@ -638,6 +638,10 @@
# Skip tests that are known to be non-deterministic.
'd8/d8-worker-sharedarraybuffer': [SKIP],
'd8/d8-os': [SKIP],
# BUG(v8:7166).
'harmony/futex': [SKIP],
'd8/enable-tracing': [SKIP],
}], # 'predictable == True'
##############################################################################
......
#!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Wrapper script for verify-predictable mode. D8 is expected to be compiled with
v8_enable_verify_predictable.
The actual test command is expected to be passed to this wraper as is. E.g.:
predictable_wrapper.py path/to/d8 --test --predictable --flag1 --flag2
The command is run up to three times and the printed allocation hash is
compared. Differences are reported as errors.
"""
import sys
from testrunner.local import command
MAX_TRIES = 3
def main(args):
def allocation_str(stdout):
for line in reversed((stdout or '').splitlines()):
if line.startswith('### Allocations = '):
return line
return None
cmd = command.Command(args[0], args[1:])
previous_allocations = None
for run in range(1, MAX_TRIES + 1):
print '### Predictable run #%d' % run
output = cmd.execute()
if output.stdout:
print '### Stdout:'
print output.stdout
if output.stderr:
print '### Stderr:'
print output.stderr
print '### Return code: %s' % output.exit_code
if output.HasTimedOut():
# If we get a timeout in any run, we are in an unpredictable state. Just
# report it as a failure and don't rerun.
print '### Test timed out'
return 1
allocations = allocation_str(output.stdout)
if not allocations:
print ('### Test had no allocation output. Ensure this is built '
'with v8_enable_verify_predictable and that '
'--verify-predictable is passed at the cmd line.')
return 2
if previous_allocations and previous_allocations != allocations:
print '### Allocations differ'
return 3
if run >= MAX_TRIES:
# No difference on the last run -> report a success.
return 0
previous_allocations = allocations
# Unreachable.
assert False
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
......@@ -179,10 +179,10 @@ class Runner(object):
self.remaining += 1
self.total += 1
def _ProcessTestNormal(self, test, result, pool):
def _ProcessTest(self, test, result, pool):
test.output = result[1]
test.duration = result[2]
has_unexpected_output = test.suite.HasUnexpectedOutput(test)
has_unexpected_output = test.suite.HasUnexpectedOutput(test, self.context)
if has_unexpected_output:
self.failed.append(test)
if test.output.HasCrashed():
......@@ -201,49 +201,6 @@ class Runner(object):
# Update the perf database if the test succeeded.
return not has_unexpected_output
def _ProcessTestPredictable(self, test, result, pool):
def HasDifferentAllocations(output1, output2):
def AllocationStr(stdout):
for line in reversed((stdout or "").splitlines()):
if line.startswith("### Allocations = "):
self.printed_allocations = True
return line
return ""
return (AllocationStr(output1.stdout) != AllocationStr(output2.stdout))
# Always pass the test duration for the database update.
test.duration = result[2]
if test.run == 1 and result[1].HasTimedOut():
# If we get a timeout in the first run, we are already in an
# unpredictable state. Just report it as a failure and don't rerun.
test.output = result[1]
self.remaining -= 1
self.failed.append(test)
self.indicator.HasRun(test, True)
if test.run > 1 and HasDifferentAllocations(test.output, result[1]):
# From the second run on, check for different allocations. If a
# difference is found, call the indicator twice to report both tests.
# All runs of each test are counted as one for the statistic.
self.remaining -= 1
self.failed.append(test)
self.indicator.HasRun(test, True)
test.output = result[1]
self.indicator.HasRun(test, True)
elif test.run >= 3:
# No difference on the third run -> report a success.
self.remaining -= 1
self.succeeded += 1
test.output = result[1]
self.indicator.HasRun(test, False)
else:
# No difference yet and less than three runs -> add another run and
# remember the output for comparison.
test.run += 1
test.output = result[1]
pool.add([TestJob(test.id, test.cmd, test.run)])
# Always update the perf database.
return True
def Run(self, jobs):
self.indicator.Starting()
self._RunInternal(jobs)
......@@ -281,10 +238,7 @@ class Runner(object):
self.indicator.Heartbeat()
continue
test = test_map[result.value[0]]
if self.context.predictable:
update_perf = self._ProcessTestPredictable(test, result.value, pool)
else:
update_perf = self._ProcessTestNormal(test, result.value, pool)
update_perf = self._ProcessTest(test, result.value, pool)
if update_perf:
self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test))
finally:
......@@ -300,14 +254,6 @@ class Runner(object):
if queued_exception[0]:
raise queued_exception[0]
# Make sure that any allocations were printed in predictable mode (if we
# ran any tests).
assert (
not self.total or
not self.context.predictable or
self.printed_allocations
)
def _VerbosePrint(self, text):
if self.context.verbose:
print text
......
......@@ -365,25 +365,31 @@ class TestSuite(object):
def IsNegativeTest(self, testcase):
return False
def HasFailed(self, testcase):
execution_failed = self.IsFailureOutput(testcase)
def HasFailed(self, testcase, ctx=None):
if ctx and ctx.predictable:
# Only check the exit code of the predictable_wrapper in
# verify-predictable mode.
execution_failed = testcase.output.exit_code != 0
else:
execution_failed = self.IsFailureOutput(testcase)
if self.IsNegativeTest(testcase):
return not execution_failed
else:
return execution_failed
def GetOutcome(self, testcase):
def GetOutcome(self, testcase, ctx=None):
if testcase.output.HasCrashed():
return statusfile.CRASH
elif testcase.output.HasTimedOut():
return statusfile.TIMEOUT
elif self.HasFailed(testcase):
elif self.HasFailed(testcase, ctx):
return statusfile.FAIL
else:
return statusfile.PASS
def HasUnexpectedOutput(self, testcase):
return self.GetOutcome(testcase) not in self.GetExpectedOutcomes(testcase)
def HasUnexpectedOutput(self, testcase, ctx=None):
return (self.GetOutcome(testcase, ctx)
not in self.GetExpectedOutcomes(testcase))
def StripOutputForTransmit(self, testcase):
if not self.HasUnexpectedOutput(testcase):
......
......@@ -66,6 +66,9 @@ SLOW_ARCHS = ["arm",
"s390x",
"arm64"]
PREDICTABLE_WRAPPER = os.path.join(
base_runner.BASE_DIR, 'tools', 'predictable_wrapper.py')
class StandardTestRunner(base_runner.BaseTestRunner):
def __init__(self):
......@@ -280,6 +283,9 @@ class StandardTestRunner(base_runner.BaseTestRunner):
options.extra_flags.append("--predictable")
options.extra_flags.append("--verify_predictable")
options.extra_flags.append("--no-inline-new")
# Add predictable wrapper to command prefix.
options.command_prefix.append(sys.executable)
options.command_prefix.append(PREDICTABLE_WRAPPER)
# TODO(machenbach): Figure out how to test a bigger subset of variants on
# msan.
......
#!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import unittest
TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PREDICTABLE_WRAPPER = os.path.join(
TOOLS_DIR, 'predictable_wrapper.py')
PREDICTABLE_MOCKED = os.path.join(
TOOLS_DIR, 'unittests', 'testdata', 'predictable_mocked.py')
def call_wrapper(mode):
"""Call the predictable wrapper under test with a mocked file to test.
Instead of d8, we use python and a python mock script. This mock script is
expecting two arguments, mode (one of 'equal', 'differ' or 'missing') and
a path to a temporary file for simulating non-determinism.
"""
fd, state_file = tempfile.mkstemp()
os.close(fd)
try:
args = [
sys.executable,
PREDICTABLE_WRAPPER,
sys.executable,
PREDICTABLE_MOCKED,
mode,
state_file,
]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
proc.communicate()
return proc.returncode
finally:
os.unlink(state_file)
class PredictableTest(unittest.TestCase):
def testEqualAllocationOutput(self):
self.assertEqual(0, call_wrapper('equal'))
def testNoAllocationOutput(self):
self.assertEqual(2, call_wrapper('missing'))
def testDifferentAllocationOutput(self):
self.assertEqual(3, call_wrapper('differ'))
if __name__ == '__main__':
unittest.main()
#!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
assert len(sys.argv) == 3
if sys.argv[1] == 'equal':
# 1. Scenario: print equal allocation hashes.
print '### Allocations = 9497, hash = 0xc322c6b0'
elif sys.argv[1] == 'differ':
# 2. Scenario: print different allocation hashes. This prints a different
# hash on the second run, based on the content of a semaphore file. This
# file is expected to be empty in the beginning.
with open(sys.argv[2]) as f:
if f.read():
print '### Allocations = 9497, hash = 0xc322c6b0'
else:
print '### Allocations = 9497, hash = 0xc322c6b1'
with open(sys.argv[2], 'w') as f:
f.write('something')
else:
# 3. Scenario: missing allocation hashes. Don't print anything.
assert 'missing'
sys.exit(0)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment