Commit e64f5461 authored by Michal Majewski's avatar Michal Majewski Committed by Commit Bot

[test] Remove old code from run-tests

Only test processors code left. It enabled to move more stuff to
the base runner, like progress indicators creation.

Bug: v8:6917
Change-Id: Ie6dd211cec561a07d92bcc4431ea88eb1842c8fa
Reviewed-on: https://chromium-review.googlesource.com/897624Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Michał Majewski <majeski@google.com>
Cr-Commit-Position: refs/heads/master@{#51030}
parent 8622d899
......@@ -21,6 +21,7 @@ sys.path.insert(
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.test_config import TestConfig
from testrunner.testproc import progress
from testrunner.testproc.rerun import RerunProc
from testrunner.testproc.shard import ShardProc
from testrunner.testproc.sigproc import SignalProc
......@@ -152,6 +153,12 @@ MODES = {
),
}
PROGRESS_INDICATORS = {
'verbose': progress.VerboseProgressIndicator,
'dots': progress.DotsProgressIndicator,
'color': progress.ColorProgressIndicator,
'mono': progress.MonochromeProgressIndicator,
}
class TestRunnerError(Exception):
pass
......@@ -276,12 +283,25 @@ class BaseTestRunner(object):
parser.add_option("--random-seed", default=0, type=int,
help="Default seed for initializing random generator")
# Progress
parser.add_option("-p", "--progress",
choices=PROGRESS_INDICATORS.keys(), default="mono",
help="The style of progress indicator (verbose, dots, "
"color, mono)")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("--junitout", help="File name of the JUnit output")
parser.add_option("--junittestsuite", default="v8tests",
help="The testsuite name in the JUnit output file")
# Rerun
parser.add_option("--rerun-failures-count", default=0, type=int,
help="Number of times to rerun each failing test case. "
"Very slow tests will be rerun only once.")
parser.add_option("--rerun-failures-max", default=100, type=int,
help="Maximum number of failing test cases to rerun")
# Test config
parser.add_option("--command-prefix", default="",
help="Prepended to each shell command used to run a test")
parser.add_option("--extra-flags", action="append", default=[],
......@@ -624,6 +644,18 @@ class BaseTestRunner(object):
return shard_run, shard_count
def _create_progress_indicators(self, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
if options.junitout:
procs.append(progress.JUnitTestProgressIndicator(options.junitout,
options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(
options.json_test_results,
self.build_config.arch,
self.mode_options.execution_mode))
return procs
def _create_timeout_proc(self, options):
if not options.total_timeout_sec:
return None
......
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import os
import re
import shutil
import sys
import traceback
from . import command
from . import perfdata
from . import statusfile
from . import utils
from . pool import Pool
from ..objects import predictable
# Base dir of the v8 checkout.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
TEST_DIR = os.path.join(BASE_DIR, "test")
# Structure that keeps global information per worker process.
ProcessContext = collections.namedtuple(
'process_context', ['sancov_dir'])
TestJobResult = collections.namedtuple(
'TestJobResult', ['id', 'outproc_result'])
def MakeProcessContext(sancov_dir):
return ProcessContext(sancov_dir)
# Global function for multiprocessing, because pickling a static method doesn't
# work on Windows.
def run_job(job, process_context):
return job.run(process_context)
class Job(object):
"""Stores data to be sent over the multi-process boundary.
All contained fields will be pickled/unpickled.
"""
def run(self, process_context):
raise NotImplementedError()
class TestJob(Job):
def __init__(self, test_id, cmd, outproc, run_num):
self.test_id = test_id
self.cmd = cmd
self.outproc = outproc
self.run_num = run_num
def _rename_coverage_data(self, out, sancov_dir):
"""Rename coverage data.
Rename files with PIDs to files with unique test IDs, because the number
of tests might be higher than pid_max. E.g.:
d8.1234.sancov -> d8.test.42.1.sancov, where 1234 was the process' PID,
42 is the test ID and 1 is the attempt (the same test might be rerun on
failures).
"""
if sancov_dir and out.pid is not None:
# Doesn't work on windows so basename is sufficient to get the shell name.
shell = os.path.basename(self.cmd.shell)
sancov_file = os.path.join(sancov_dir, "%s.%d.sancov" % (shell, out.pid))
# Some tests are expected to fail and don't produce coverage data.
if os.path.exists(sancov_file):
parts = sancov_file.split(".")
new_sancov_file = ".".join(
parts[:-2] +
["test", str(self.test_id), str(self.run_num)] +
parts[-1:]
)
assert not os.path.exists(new_sancov_file)
os.rename(sancov_file, new_sancov_file)
def run(self, context):
output = self.cmd.execute()
self._rename_coverage_data(output, context.sancov_dir)
return TestJobResult(self.test_id, self.outproc.process(output))
class Runner(object):
def __init__(self, suites, progress_indicator, context, outproc_factory=None):
self.datapath = os.path.join("out", "testrunner_data")
self.perf_data_manager = perfdata.GetPerfDataManager(
context, self.datapath)
self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
self.perf_failures = False
self.printed_allocations = False
self.outproc_factory = outproc_factory or (lambda test: test.output_proc)
self.tests = [t for s in suites for t in s.tests]
# TODO(majeski): Pass dynamically instead of keeping them in the runner.
# Maybe some observer?
self.outputs = {t: None for t in self.tests}
self.suite_names = [s.name for s in suites]
# Always pre-sort by status file, slowest tests first.
self.tests.sort(key=lambda t: t.is_slow, reverse=True)
# Sort by stored duration if not opted out.
if not context.no_sorting:
self.tests.sort(key=lambda t: self.perfdata.FetchPerfData(t) or 1.0,
reverse=True)
self._CommonInit(suites, progress_indicator, context)
def _CommonInit(self, suites, progress_indicator, context):
self.total = 0
for s in suites:
for t in s.tests:
t.id = self.total
self.total += 1
self.indicator = progress_indicator
progress_indicator.SetRunner(self)
self.context = context
self.succeeded = 0
self.remaining = self.total
self.failed = []
self.crashed = 0
self.reran_tests = 0
def _RunPerfSafe(self, fun):
try:
fun()
except Exception, e:
print("PerfData exception: %s" % e)
self.perf_failures = True
def _MaybeRerun(self, pool, test, result):
if test.run <= self.context.rerun_failures_count:
# Possibly rerun this test if its run count is below the maximum per
# test. <= as the flag controls reruns not including the first run.
if test.run == 1:
# Count the overall number of reran tests on the first rerun.
if self.reran_tests < self.context.rerun_failures_max:
self.reran_tests += 1
else:
# Don't rerun this if the overall number of rerun tests has been
# reached.
return
if (test.run >= 2 and
result.output.duration > self.context.timeout / 20.0):
# Rerun slow tests at most once.
return
# Rerun this test.
test.run += 1
pool.add([
TestJob(test.id, test.cmd, self.outproc_factory(test), test.run)
])
self.remaining += 1
self.total += 1
def _ProcessTest(self, test, result, pool):
self.outputs[test] = result.output
has_unexpected_output = result.has_unexpected_output
if has_unexpected_output:
self.failed.append(test)
if result.output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
# For the indicator, everything that happens after the first run is treated
# as unexpected even if it flakily passes in order to include it in the
# output.
self.indicator.HasRun(test, result.output,
has_unexpected_output or test.run > 1)
if has_unexpected_output:
# Rerun test failures after the indicator has processed the results.
self._VerbosePrint("Attempting to rerun test after failure.")
self._MaybeRerun(pool, test, result)
# Update the perf database if the test succeeded.
return not has_unexpected_output
def Run(self, jobs):
self.indicator.Starting()
self._RunInternal(jobs)
self.indicator.Done()
if self.failed:
return 1
elif self.remaining:
return 2
return 0
def _RunInternal(self, jobs):
pool = Pool(jobs)
test_map = {}
queued_exception = [None]
def gen_tests():
for test in self.tests:
assert test.id >= 0
test_map[test.id] = test
try:
yield [
TestJob(test.id, test.cmd, self.outproc_factory(test), test.run)
]
except Exception, e:
# If this failed, save the exception and re-raise it later (after
# all other tests have had a chance to run).
queued_exception[0] = e, traceback.format_exc()
continue
try:
it = pool.imap_unordered(
fn=run_job,
gen=gen_tests(),
process_context_fn=MakeProcessContext,
process_context_args=[self.context.sancov_dir],
)
for result in it:
if result.heartbeat:
self.indicator.Heartbeat()
continue
job_result = result.value
test_id = job_result.id
outproc_result = job_result.outproc_result
test = test_map[test_id]
update_perf = self._ProcessTest(test, outproc_result, pool)
if update_perf:
self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(
test, outproc_result.output.duration))
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
raise
finally:
self._VerbosePrint("Closing process pool.")
pool.terminate()
self._VerbosePrint("Closing database connection.")
self._RunPerfSafe(self.perf_data_manager.close)
if self.perf_failures:
# Nuke perf data in case of failures. This might not work on windows as
# some files might still be open.
print "Deleting perf test data due to db corruption."
shutil.rmtree(self.datapath)
if queued_exception[0]:
e, stacktrace = queued_exception[0]
print stacktrace
raise e
def _VerbosePrint(self, text):
if self.context.verbose:
print text
sys.stdout.flush()
class BreakNowException(Exception):
def __init__(self, value):
super(BreakNowException, self).__init__()
self.value = value
def __str__(self):
return repr(self.value)
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shelve
import threading
class PerfDataEntry(object):
def __init__(self):
self.avg = 0.0
self.count = 0
def AddResult(self, result):
kLearnRateLimiter = 99 # Greater value means slower learning.
# We use an approximation of the average of the last 100 results here:
# The existing average is weighted with kLearnRateLimiter (or less
# if there are fewer data points).
effective_count = min(self.count, kLearnRateLimiter)
self.avg = self.avg * effective_count + result
self.count = effective_count + 1
self.avg /= self.count
class PerfDataStore(object):
def __init__(self, datadir, arch, mode):
filename = os.path.join(datadir, "%s.%s.perfdata" % (arch, mode))
self.database = shelve.open(filename, protocol=2)
self.closed = False
self.lock = threading.Lock()
def __del__(self):
self.close()
def close(self):
if self.closed: return
self.database.close()
self.closed = True
def FetchPerfData(self, test):
"""Returns the observed duration for |test| as read from the store."""
key = test.get_id()
if key in self.database:
return self.database[key].avg
return None
def UpdatePerfData(self, test, duration):
"""Updates the persisted value in the store with duration."""
testkey = test.get_id()
self.RawUpdatePerfData(testkey, duration)
def RawUpdatePerfData(self, testkey, duration):
with self.lock:
if testkey in self.database:
entry = self.database[testkey]
else:
entry = PerfDataEntry()
entry.AddResult(duration)
self.database[testkey] = entry
class PerfDataManager(object):
def __init__(self, datadir):
self.datadir = os.path.abspath(datadir)
if not os.path.exists(self.datadir):
os.makedirs(self.datadir)
self.stores = {} # Keyed by arch, then mode.
self.closed = False
self.lock = threading.Lock()
def __del__(self):
self.close()
def close(self):
if self.closed: return
for arch in self.stores:
modes = self.stores[arch]
for mode in modes:
store = modes[mode]
store.close()
self.closed = True
def GetStore(self, arch, mode):
with self.lock:
if not arch in self.stores:
self.stores[arch] = {}
modes = self.stores[arch]
if not mode in modes:
modes[mode] = PerfDataStore(self.datadir, arch, mode)
return modes[mode]
class NullPerfDataStore(object):
def UpdatePerfData(self, test, duration):
pass
def FetchPerfData(self, test):
return None
class NullPerfDataManager(object):
def __init__(self):
pass
def GetStore(self, *args, **kwargs):
return NullPerfDataStore()
def close(self):
pass
def GetPerfDataManager(context, datadir):
if context.use_perf_data:
return PerfDataManager(datadir)
else:
return NullPerfDataManager()
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from functools import wraps
import json
import os
import sys
import time
from . import junit_output
from . import statusfile
from ..testproc import progress as progress_proc
class ProgressIndicator(object):
def __init__(self):
self.runner = None
def SetRunner(self, runner):
self.runner = runner
def Starting(self):
pass
def Done(self):
pass
def HasRun(self, test, output, has_unexpected_output):
pass
def Heartbeat(self):
pass
def PrintFailureHeader(self, test):
if test.output_proc.negative:
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test,
'negative': negative_marker,
}
def ToProgressIndicatorProc(self):
print ('Warning: %s is not available as a processor' %
self.__class__.__name__)
return None
class IndicatorNotifier(object):
"""Holds a list of progress indicators and notifies them all on events."""
def __init__(self):
self.indicators = []
def Register(self, indicator):
self.indicators.append(indicator)
def ToProgressIndicatorProcs(self):
return [i.ToProgressIndicatorProc() for i in self.indicators]
# Forge all generic event-dispatching methods in IndicatorNotifier, which are
# part of the ProgressIndicator interface.
for func_name in ProgressIndicator.__dict__:
func = getattr(ProgressIndicator, func_name)
if callable(func) and not func.__name__.startswith('_'):
def wrap_functor(f):
@wraps(f)
def functor(self, *args, **kwargs):
"""Generic event dispatcher."""
for indicator in self.indicators:
getattr(indicator, f.__name__)(*args, **kwargs)
return functor
setattr(IndicatorNotifier, func_name, wrap_functor(func))
class SimpleProgressIndicator(ProgressIndicator):
"""Abstract base class for {Verbose,Dots}ProgressIndicator"""
def Starting(self):
print 'Running %i tests' % self.runner.total
def Done(self):
print
for failed in self.runner.failed:
output = self.runner.outputs[failed]
self.PrintFailureHeader(failed)
if output.stderr:
print "--- stderr ---"
print output.stderr.strip()
if output.stdout:
print "--- stdout ---"
print output.stdout.strip()
print "Command: %s" % failed.cmd.to_string()
if output.HasCrashed():
print "exit code: %d" % output.exit_code
print "--- CRASHED ---"
if output.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.runner.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.runner.failed)
if self.runner.crashed > 0:
print "=== %i tests CRASHED" % self.runner.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def HasRun(self, test, output, has_unexpected_output):
if has_unexpected_output:
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (test, outcome)
sys.stdout.flush()
def Heartbeat(self):
print 'Still working...'
sys.stdout.flush()
def ToProgressIndicatorProc(self):
return progress_proc.VerboseProgressIndicator()
class DotsProgressIndicator(SimpleProgressIndicator):
def HasRun(self, test, output, has_unexpected_output):
total = self.runner.succeeded + len(self.runner.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if has_unexpected_output:
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
def ToProgressIndicatorProc(self):
return progress_proc.DotsProgressIndicator()
class CompactProgressIndicator(ProgressIndicator):
"""Abstract base class for {Color,Monochrome}ProgressIndicator"""
def __init__(self, templates):
super(CompactProgressIndicator, self).__init__()
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Done(self):
self.PrintProgress('Done')
print "" # Line break.
def HasRun(self, test, output, has_unexpected_output):
self.PrintProgress(str(test))
if has_unexpected_output:
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(test)
stdout = output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % test.cmd.to_string()
if output.HasCrashed():
print "exit code: %d" % output.exit_code
print "--- CRASHED ---"
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, string, length):
if length and (len(string) > (length - 3)):
return string[:(length - 3)] + "..."
else:
return string
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
progress = 0 if not self.runner.total else (
((self.runner.total - self.runner.remaining) * 100) //
self.runner.total)
status = self.templates['status_line'] % {
'passed': self.runner.succeeded,
'progress': progress,
'failed': len(self.runner.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|"
"\033[34m%%%(progress) 4d\033[0m|"
"\033[32m+%(passed) 4d\033[0m|"
"\033[31m-%(failed) 4d\033[0m]: %(test)s"),
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
def ToProgressIndicatorProc(self):
return progress_proc.ColorProgressIndicator()
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
"+%(passed) 4d|-%(failed) 4d]: %(test)s"),
'stdout': '%s',
'stderr': '%s',
}
super(MonochromeProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
def ToProgressIndicatorProc(self):
return progress_proc.MonochromeProgressIndicator()
class JUnitTestProgressIndicator(ProgressIndicator):
def __init__(self, junitout, junittestsuite):
super(JUnitTestProgressIndicator, self).__init__()
self.junitout = junitout
self.juinttestsuite = junittestsuite
self.outputter = junit_output.JUnitTestOutput(junittestsuite)
if junitout:
self.outfile = open(junitout, "w")
else:
self.outfile = sys.stdout
def Done(self):
self.outputter.FinishAndWrite(self.outfile)
if self.outfile != sys.stdout:
self.outfile.close()
def HasRun(self, test, output, has_unexpected_output):
fail_text = ""
if has_unexpected_output:
stdout = output.stdout.strip()
if len(stdout):
fail_text += "stdout:\n%s\n" % stdout
stderr = output.stderr.strip()
if len(stderr):
fail_text += "stderr:\n%s\n" % stderr
fail_text += "Command: %s" % test.cmd.to_string()
if output.HasCrashed():
fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
if output.HasTimedOut():
fail_text += "--- TIMEOUT ---"
self.outputter.HasRunTest(
test_name=str(test),
test_cmd=test.cmd.to_string(relative=True),
test_duration=output.duration,
test_failure=fail_text)
def ToProgressIndicatorProc(self):
if self.outfile != sys.stdout:
self.outfile.close()
return progress_proc.JUnitTestProgressIndicator(self.junitout,
self.junittestsuite)
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, json_test_results, arch, mode):
super(JsonTestProgressIndicator, self).__init__()
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
self.results = []
self.tests = []
def ToProgressIndicatorProc(self):
return progress_proc.JsonTestProgressIndicator(
self.json_test_results, self.arch, self.mode)
def Done(self):
complete_results = []
if os.path.exists(self.json_test_results):
with open(self.json_test_results, "r") as f:
# Buildbot might start out with an empty file.
complete_results = json.loads(f.read() or "[]")
duration_mean = None
if self.tests:
# Get duration mean.
duration_mean = (
sum(duration for (_, duration) in self.tests) /
float(len(self.tests)))
# Sort tests by duration.
self.tests.sort(key=lambda (_, duration): duration, reverse=True)
slowest_tests = [
{
"name": str(test),
"flags": test.cmd.args,
"command": test.cmd.to_string(relative=True),
"duration": duration,
"marked_slow": test.is_slow,
} for (test, duration) in self.tests[:20]
]
complete_results.append({
"arch": self.arch,
"mode": self.mode,
"results": self.results,
"slowest_tests": slowest_tests,
"duration_mean": duration_mean,
"test_total": len(self.tests),
})
with open(self.json_test_results, "w") as f:
f.write(json.dumps(complete_results))
def HasRun(self, test, output, has_unexpected_output):
# Buffer all tests for sorting the durations in the end.
self.tests.append((test, output.duration))
if not has_unexpected_output:
# Omit tests that run as expected. Passing tests of reruns after failures
# will have unexpected_output to be reported here has well.
return
random_seed = None
for i, flag in enumerate(reversed(test.cmd.args)):
if 'random-seed' in flag:
if '=' in flag:
random_seed = flag.split('=')[1]
break
elif i > 0:
random_seed = test.cmd.args[i - 1]
break
self.results.append({
"name": str(test),
"flags": test.cmd.args,
"command": test.cmd.to_string(relative=True),
"run": test.run,
"stdout": output.stdout,
"stderr": output.stderr,
"exit_code": output.exit_code,
"result": test.output_proc.get_outcome(output),
"expected": test.expected_outcomes,
"duration": output.duration,
"random_seed": int(random_seed),
"target_name": test.get_shell(),
"variant": test.variant,
})
class FlakinessTestProgressIndicator(ProgressIndicator):
def __init__(self, json_test_results):
super(FlakinessTestProgressIndicator, self).__init__()
self.json_test_results = json_test_results
self.results = {}
self.summary = {
"PASS": 0,
"FAIL": 0,
"CRASH": 0,
"TIMEOUT": 0,
}
self.seconds_since_epoch = time.time()
def Done(self):
with open(self.json_test_results, "w") as f:
json.dump({
"interrupted": False,
"num_failures_by_type": self.summary,
"path_delimiter": "/",
"seconds_since_epoch": self.seconds_since_epoch,
"tests": self.results,
"version": 3,
}, f)
def HasRun(self, test, output, has_unexpected_output):
key = test.get_id()
outcome = test.output_proc.get_outcome(output)
assert outcome in ["PASS", "FAIL", "CRASH", "TIMEOUT"]
if test.run == 1:
# First run of this test.
self.results[key] = {
"actual": outcome,
"expected": " ".join(test.expected_outcomes),
"times": [output.duration],
}
self.summary[outcome] = self.summary[outcome] + 1
else:
# This is a rerun and a previous result exists.
result = self.results[key]
result["actual"] = "%s %s" % (result["actual"], outcome)
result["times"].append(output.duration)
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'mono': MonochromeProgressIndicator
}
......@@ -12,7 +12,6 @@ import sys
# Adds testrunner to the path hence it has to be imported at the beggining.
import base_runner
from testrunner.local import progress
from testrunner.local import utils
from testrunner.testproc import fuzzer
......@@ -36,13 +35,6 @@ class NumFuzzer(base_runner.BaseTestRunner):
parser.add_option("--dump-results-file", help="Dump maximum limit reached")
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
parser.add_option("--fuzzer-random-seed", default=0,
help="Default seed for initializing fuzzer random "
"generator")
......@@ -130,14 +122,6 @@ class NumFuzzer(base_runner.BaseTestRunner):
self._setup_suites(options, suites)
tests = self._load_tests(options, suites)
progress_indicator = progress.IndicatorNotifier()
progress_indicator.Register(
progress.PROGRESS_INDICATORS[options.progress]())
if options.json_test_results:
progress_indicator.Register(progress.JsonTestProgressIndicator(
options.json_test_results,
self.build_config.arch,
self.mode_options.execution_mode))
loader = LoadProc()
fuzzer_rng = random.Random(options.fuzzer_random_seed)
......@@ -145,7 +129,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
combiner = self._create_combiner(fuzzer_rng, options)
results = ResultsTracker()
execproc = ExecutionProc(options.j)
indicators = progress_indicator.ToProgressIndicatorProcs()
indicators = self._create_progress_indicators(options)
procs = [
loader,
NameFilterProc(args) if args else None,
......
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
isolates, command_prefix, extra_flags, noi18n, no_sorting,
rerun_failures_count, rerun_failures_max, no_harness,
use_perf_data, sancov_dir):
# Used by perfdata
self.arch = arch
self.mode = mode
self.no_sorting = no_sorting
self.use_perf_data = use_perf_data
# Used by testcase to create command
self.command_prefix = command_prefix
self.extra_flags = extra_flags
self.isolates = isolates
self.mode_flags = mode_flags
self.no_harness = no_harness
self.noi18n = noi18n
self.shell_dir = shell_dir
self.timeout = timeout
self.verbose = verbose
# Will be deprecated after moving to test processors
self.rerun_failures_count = rerun_failures_count
self.rerun_failures_max = rerun_failures_max
self.sancov_dir = sancov_dir
......@@ -5,33 +5,21 @@
# found in the LICENSE file.
from collections import OrderedDict
from os.path import join
import multiprocessing
import os
import random
import re
import subprocess
import sys
import time
# Adds testrunner to the path hence it has to be imported at the beggining.
import base_runner
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.local.variants import ALL_VARIANTS
from testrunner.objects import context
from testrunner.objects import predictable
from testrunner.testproc.execution import ExecutionProc
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc
from testrunner.testproc.progress import (VerboseProgressIndicator,
ResultsTracker,
TestsCounter)
from testrunner.testproc.progress import ResultsTracker, TestsCounter
from testrunner.testproc.seed import SeedProc
from testrunner.testproc.variant import VariantProc
from testrunner.utils import random_utils
......@@ -70,20 +58,6 @@ RANDOM_GC_STRESS_FLAGS = ["--random-gc-interval=5000",
PREDICTABLE_WRAPPER = os.path.join(
base_runner.BASE_DIR, 'tools', 'predictable_wrapper.py')
# Staging default. When set to True it overwrites the two options below.
USE_STAGING = True
# Specifies which builders should use the staging test-runner.
# Mapping from mastername to list of buildernames. Buildernames can be strings
# or compiled regexps which will be matched.
BUILDER_WHITELIST_STAGING = {
}
_RE_TYPE = type(re.compile(''))
# Specifies which architectures are whitelisted to use the staging test-runner.
# List of arch strings, e.g. "x64".
ARCH_WHITELIST_STAGING = [
]
class StandardTestRunner(base_runner.BaseTestRunner):
def __init__(self, *args, **kwargs):
......@@ -153,18 +127,11 @@ class StandardTestRunner(base_runner.BaseTestRunner):
default=False, action="store_true",
help="Deprecated. "
"Equivalent to passing --variants=exhaustive")
parser.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
parser.add_option("--quickcheck", default=False, action="store_true",
help=("Quick check mode (skip slow tests)"))
parser.add_option("--report", help="Print a summary of the tests to be"
" run",
default=False, action="store_true")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("--flakiness-results",
help="Path to a file for storing flakiness json.")
parser.add_option("--dont-skip-slow-simulator-tests",
......@@ -179,34 +146,12 @@ class StandardTestRunner(base_runner.BaseTestRunner):
default=False, action="store_true")
parser.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
parser.add_option("--junitout", help="File name of the JUnit output")
parser.add_option("--junittestsuite",
help="The testsuite name in the JUnit output file",
default="v8tests")
parser.add_option("--random-seed-stress-count", default=1, type="int",
dest="random_seed_stress_count",
help="Number of runs with different random seeds. Only "
"with test processors: 0 means infinite "
"generation.")
def _use_staging(self, options):
if options.infra_staging is not None:
# True or False are used to explicitly opt in or out.
return options.infra_staging
if USE_STAGING:
return True
builder_configs = BUILDER_WHITELIST_STAGING.get(options.mastername, [])
for builder_config in builder_configs:
if (isinstance(builder_config, _RE_TYPE) and
builder_config.match(options.buildername)):
return True
if builder_config == options.buildername:
return True
for arch in ARCH_WHITELIST_STAGING:
if self.build_config.arch == arch:
return True
return False
def _process_options(self, options):
if options.sancov_dir:
self.sancov_dir = options.sancov_dir
......@@ -268,10 +213,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if options.variants == "infra_staging":
options.variants = "exhaustive"
options.infra_staging = True
# Use staging on whitelisted masters/builders.
options.infra_staging = self._use_staging(options)
self._variants = self._parse_variants(options.variants)
......@@ -320,25 +261,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
def _execute(self, args, options, suites):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
# Populate context object.
ctx = context.Context(self.build_config.arch,
self.mode_options.execution_mode,
self.outdir,
self.mode_options.flags,
options.verbose,
options.timeout *
self._timeout_scalefactor(options),
options.isolates,
options.command_prefix,
options.extra_flags,
self.build_config.no_i18n,
options.no_sorting,
options.rerun_failures_count,
options.rerun_failures_max,
options.no_harness,
use_perf_data=not options.swarming,
sancov_dir=self.sancov_dir)
# simd_mips is true if SIMD is fully supported on MIPS
simd_mips = (
self.build_config.arch in [ 'mipsel', 'mips', 'mips64', 'mips64el'] and
......@@ -381,147 +303,20 @@ class StandardTestRunner(base_runner.BaseTestRunner):
"ubsan_vptr": self.build_config.ubsan_vptr,
}
progress_indicator = progress.IndicatorNotifier()
progress_indicator.Register(
progress.PROGRESS_INDICATORS[options.progress]())
if options.junitout: # pragma: no cover
progress_indicator.Register(progress.JUnitTestProgressIndicator(
options.junitout, options.junittestsuite))
if options.json_test_results:
progress_indicator.Register(progress.JsonTestProgressIndicator(
options.json_test_results,
self.build_config.arch,
self.mode_options.execution_mode))
if options.flakiness_results: # pragma: no cover
progress_indicator.Register(progress.FlakinessTestProgressIndicator(
options.flakiness_results))
if True:
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases()
return self._run_test_procs(suites, args, options, progress_indicator)
all_tests = []
num_tests = 0
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases()
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
# First filtering by status applying the generic rules (tests without
# variants)
if options.warn_unused:
tests = [(t.name, t.variant) for t in s.tests]
s.statusfile.warn_unused_rules(tests, check_variant_rules=False)
s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
variant_gen = s.CreateLegacyVariantsGenerator(self._variants)
variant_tests = [ t.create_variant(v, flags)
for t in s.tests
for v in variant_gen.FilterVariantsByTest(t)
for flags in variant_gen.GetFlagSets(t, v) ]
# Duplicate test for random seed stress mode.
def iter_seed_flags():
for _ in range(0, options.random_seed_stress_count or 1):
# Use given random seed for all runs (set by default in
# execution.py) or a new random seed if none is specified.
if options.random_seed:
yield options.random_seed
else:
yield random_utils.random_seed()
s.tests = [
t.create_variant(t.variant, [], 'seed-%d' % n, random_seed=val)
for t in variant_tests
for n, val in enumerate(iter_seed_flags())
]
# Second filtering by status applying also the variant-dependent rules.
if options.warn_unused:
tests = [(t.name, t.variant) for t in s.tests]
s.statusfile.warn_unused_rules(tests, check_variant_rules=True)
s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
s.tests = self._shard_tests(s.tests, options)
for t in s.tests:
t.cmd = t.get_command()
num_tests += len(s.tests)
if options.cat:
return 0 # We're done here.
if options.report:
verbose.PrintReport(all_tests)
# Run the tests.
start_time = time.time()
if self.build_config.predictable:
outproc_factory = predictable.get_outproc
else:
outproc_factory = None
runner = execution.Runner(suites, progress_indicator, ctx,
outproc_factory)
exit_code = runner.Run(options.j)
overall_duration = time.time() - start_time
if options.time:
verbose.PrintTestDurations(suites, runner.outputs, overall_duration)
if num_tests == 0:
exit_code = 3
print("Warning: no tests were run!")
if exit_code == 1 and options.json_test_results:
print("Force exit code 0 after failures. Json test results file "
"generated with failure information.")
exit_code = 0
if self.sancov_dir:
# If tests ran with sanitizer coverage, merge coverage files in the end.
try:
print "Merging sancov files."
subprocess.check_call([
sys.executable,
join(self.basedir, "tools", "sanitizers", "sancov_merger.py"),
"--coverage-dir=%s" % self.sancov_dir])
except:
print >> sys.stderr, "Error: Merging sancov files failed."
exit_code = 1
return exit_code
def _shard_tests(self, tests, options):
shard_run, shard_count = self._get_shard_info(options)
if shard_count < 2:
return tests
count = 0
shard = []
for test in tests:
if count % shard_count == shard_run - 1:
shard.append(test)
count += 1
return shard
return self._run_test_procs(suites, args, options)
def _run_test_procs(self, suites, args, options, progress_indicator):
def _run_test_procs(self, suites, args, options):
jobs = options.j
print '>>> Running with test processors'
loader = LoadProc()
tests_counter = TestsCounter()
results = ResultsTracker()
indicators = progress_indicator.ToProgressIndicatorProcs()
indicators = self._create_progress_indicators(options)
outproc_factory = None
if self.build_config.predictable:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment