Commit 867c39f2 authored by Michal Majewski's avatar Michal Majewski Committed by Commit Bot

[test] Remove duration from testcase.

Bug: v8:6917
Change-Id: Ifc3dac7a82fa67dda1f8b166fbd3f76a123ffba2
Reviewed-on: https://chromium-review.googlesource.com/824365
Commit-Queue: Michał Majewski <majeski@google.com>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Reviewed-by: 's avatarSergiy Byelozyorov <sergiyb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50122}
parent ce2a9e16
...@@ -8,6 +8,7 @@ import os ...@@ -8,6 +8,7 @@ import os
import subprocess import subprocess
import sys import sys
import threading import threading
import time
from ..local import utils from ..local import utils
from ..objects import output from ..objects import output
...@@ -79,7 +80,9 @@ class Command(object): ...@@ -79,7 +80,9 @@ class Command(object):
self.timeout, self._kill_process, [process, timeout_occured]) self.timeout, self._kill_process, [process, timeout_occured])
timer.start() timer.start()
start_time = time.time()
stdout, stderr = process.communicate() stdout, stderr = process.communicate()
duration = time.time() - start_time
timer.cancel() timer.cancel()
...@@ -89,6 +92,7 @@ class Command(object): ...@@ -89,6 +92,7 @@ class Command(object):
stdout.decode('utf-8', 'replace').encode('utf-8'), stdout.decode('utf-8', 'replace').encode('utf-8'),
stderr.decode('utf-8', 'replace').encode('utf-8'), stderr.decode('utf-8', 'replace').encode('utf-8'),
process.pid, process.pid,
duration
) )
def _get_popen_args(self): def _get_popen_args(self):
......
...@@ -31,7 +31,6 @@ import os ...@@ -31,7 +31,6 @@ import os
import re import re
import shutil import shutil
import sys import sys
import time
from . import command from . import command
from . import perfdata from . import perfdata
...@@ -51,9 +50,13 @@ ProcessContext = collections.namedtuple( ...@@ -51,9 +50,13 @@ ProcessContext = collections.namedtuple(
'process_context', ['sancov_dir']) 'process_context', ['sancov_dir'])
TestJobResult = collections.namedtuple(
'TestJobResult', ['id', 'output'])
def MakeProcessContext(sancov_dir): def MakeProcessContext(sancov_dir):
return ProcessContext(sancov_dir) return ProcessContext(sancov_dir)
# Global function for multiprocessing, because pickling a static method doesn't # Global function for multiprocessing, because pickling a static method doesn't
# work on Windows. # work on Windows.
def run_job(job, process_context): def run_job(job, process_context):
...@@ -102,11 +105,9 @@ class TestJob(Job): ...@@ -102,11 +105,9 @@ class TestJob(Job):
os.rename(sancov_file, new_sancov_file) os.rename(sancov_file, new_sancov_file)
def run(self, context): def run(self, context):
start_time = time.time()
out = self.cmd.execute() out = self.cmd.execute()
duration = time.time() - start_time
self._rename_coverage_data(out, context.sancov_dir) self._rename_coverage_data(out, context.sancov_dir)
return (self.test_id, out, duration) return TestJobResult(self.test_id, out)
class Runner(object): class Runner(object):
...@@ -120,9 +121,10 @@ class Runner(object): ...@@ -120,9 +121,10 @@ class Runner(object):
self.printed_allocations = False self.printed_allocations = False
self.tests = [t for s in suites for t in s.tests] self.tests = [t for s in suites for t in s.tests]
# TODO(majeski): Pass outputs dynamically instead of keeping them in the # TODO(majeski): Pass dynamically instead of keeping them in the runner.
# runner. # Maybe some observer?
self.outputs = {t: None for t in self.tests} self.outputs = {t: None for t in self.tests}
self.suite_names = [s.name for s in suites] self.suite_names = [s.name for s in suites]
# Always pre-sort by status file, slowest tests first. # Always pre-sort by status file, slowest tests first.
...@@ -130,11 +132,10 @@ class Runner(object): ...@@ -130,11 +132,10 @@ class Runner(object):
t.suite.GetStatusFileOutcomes(t.name, t.variant)) t.suite.GetStatusFileOutcomes(t.name, t.variant))
self.tests.sort(key=slow_key, reverse=True) self.tests.sort(key=slow_key, reverse=True)
# Sort by stored duration of not opted out. # Sort by stored duration if not opted out.
if not context.no_sorting: if not context.no_sorting:
for t in self.tests: self.tests.sort(key=lambda t: self.perfdata.FetchPerfData(t) or 1.0,
t.duration = self.perfdata.FetchPerfData(t) or 1.0 reverse=True)
self.tests.sort(key=lambda t: t.duration, reverse=True)
self._CommonInit(suites, progress_indicator, context) self._CommonInit(suites, progress_indicator, context)
...@@ -160,7 +161,7 @@ class Runner(object): ...@@ -160,7 +161,7 @@ class Runner(object):
print("PerfData exception: %s" % e) print("PerfData exception: %s" % e)
self.perf_failures = True self.perf_failures = True
def _MaybeRerun(self, pool, test): def _MaybeRerun(self, pool, test, job_result):
if test.run <= self.context.rerun_failures_count: if test.run <= self.context.rerun_failures_count:
# Possibly rerun this test if its run count is below the maximum per # Possibly rerun this test if its run count is below the maximum per
# test. <= as the flag controls reruns not including the first run. # test. <= as the flag controls reruns not including the first run.
...@@ -172,26 +173,24 @@ class Runner(object): ...@@ -172,26 +173,24 @@ class Runner(object):
# Don't rerun this if the overall number of rerun tests has been # Don't rerun this if the overall number of rerun tests has been
# reached. # reached.
return return
if test.run >= 2 and test.duration > self.context.timeout / 20.0: if (test.run >= 2 and
job_result.output.duration > self.context.timeout / 20.0):
# Rerun slow tests at most once. # Rerun slow tests at most once.
return return
# Rerun this test. # Rerun this test.
test.duration = None
test.run += 1 test.run += 1
pool.add([TestJob(test.id, test.cmd, test.run)]) pool.add([TestJob(test.id, test.cmd, test.run)])
self.remaining += 1 self.remaining += 1
self.total += 1 self.total += 1
def _ProcessTest(self, test, result, pool): def _ProcessTest(self, test, job_result, pool):
output = result[1] self.outputs[test] = job_result.output
self.outputs[test] = output
test.duration = result[2]
has_unexpected_output = test.suite.HasUnexpectedOutput( has_unexpected_output = test.suite.HasUnexpectedOutput(
test, output, self.context) test, job_result.output, self.context)
if has_unexpected_output: if has_unexpected_output:
self.failed.append(test) self.failed.append(test)
if output.HasCrashed(): if job_result.output.HasCrashed():
self.crashed += 1 self.crashed += 1
else: else:
self.succeeded += 1 self.succeeded += 1
...@@ -199,11 +198,12 @@ class Runner(object): ...@@ -199,11 +198,12 @@ class Runner(object):
# For the indicator, everything that happens after the first run is treated # For the indicator, everything that happens after the first run is treated
# as unexpected even if it flakily passes in order to include it in the # as unexpected even if it flakily passes in order to include it in the
# output. # output.
self.indicator.HasRun(test, output, has_unexpected_output or test.run > 1) self.indicator.HasRun(test, job_result.output,
has_unexpected_output or test.run > 1)
if has_unexpected_output: if has_unexpected_output:
# Rerun test failures after the indicator has processed the results. # Rerun test failures after the indicator has processed the results.
self._VerbosePrint("Attempting to rerun test after failure.") self._VerbosePrint("Attempting to rerun test after failure.")
self._MaybeRerun(pool, test) self._MaybeRerun(pool, test, job_result)
# Update the perf database if the test succeeded. # Update the perf database if the test succeeded.
return not has_unexpected_output return not has_unexpected_output
...@@ -243,10 +243,14 @@ class Runner(object): ...@@ -243,10 +243,14 @@ class Runner(object):
if result.heartbeat: if result.heartbeat:
self.indicator.Heartbeat() self.indicator.Heartbeat()
continue continue
test = test_map[result.value[0]]
update_perf = self._ProcessTest(test, result.value, pool) job_result = result.value
test = test_map[job_result.id]
update_perf = self._ProcessTest(test, job_result, pool)
if update_perf: if update_perf:
self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test)) self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(
test, job_result.output.duration))
finally: finally:
self._VerbosePrint("Closing process pool.") self._VerbosePrint("Closing process pool.")
pool.terminate() pool.terminate()
......
...@@ -69,10 +69,10 @@ class PerfDataStore(object): ...@@ -69,10 +69,10 @@ class PerfDataStore(object):
return self.database[key].avg return self.database[key].avg
return None return None
def UpdatePerfData(self, test): def UpdatePerfData(self, test, duration):
"""Updates the persisted value in the store with test.duration.""" """Updates the persisted value in the store with duration."""
testkey = test.get_id() testkey = test.get_id()
self.RawUpdatePerfData(testkey, test.duration) self.RawUpdatePerfData(testkey, duration)
def RawUpdatePerfData(self, testkey, duration): def RawUpdatePerfData(self, testkey, duration):
with self.lock: with self.lock:
...@@ -116,7 +116,7 @@ class PerfDataManager(object): ...@@ -116,7 +116,7 @@ class PerfDataManager(object):
class NullPerfDataStore(object): class NullPerfDataStore(object):
def UpdatePerfData(self, test): def UpdatePerfData(self, test, duration):
pass pass
def FetchPerfData(self, test): def FetchPerfData(self, test):
......
...@@ -287,7 +287,7 @@ class JUnitTestProgressIndicator(ProgressIndicator): ...@@ -287,7 +287,7 @@ class JUnitTestProgressIndicator(ProgressIndicator):
self.outputter.HasRunTest( self.outputter.HasRunTest(
test_name=str(test), test_name=str(test),
test_cmd=test.cmd.to_string(relative=True), test_cmd=test.cmd.to_string(relative=True),
test_duration=test.duration, test_duration=output.duration,
test_failure=fail_text) test_failure=fail_text)
...@@ -313,20 +313,20 @@ class JsonTestProgressIndicator(ProgressIndicator): ...@@ -313,20 +313,20 @@ class JsonTestProgressIndicator(ProgressIndicator):
if self.tests: if self.tests:
# Get duration mean. # Get duration mean.
duration_mean = ( duration_mean = (
sum(t.duration for t in self.tests) / float(len(self.tests))) sum(duration for (_, duration) in self.tests) /
float(len(self.tests)))
# Sort tests by duration. # Sort tests by duration.
timed_tests = [t for t in self.tests if t.duration is not None] self.tests.sort(key=lambda (_, duration): duration, reverse=True)
timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
slowest_tests = [ slowest_tests = [
{ {
"name": str(test), "name": str(test),
"flags": test.cmd.args, "flags": test.cmd.args,
"command": test.cmd.to_string(relative=True), "command": test.cmd.to_string(relative=True),
"duration": test.duration, "duration": duration,
"marked_slow": statusfile.IsSlow( "marked_slow": statusfile.IsSlow(
test.suite.GetStatusFileOutcomes(test.name, test.variant)), test.suite.GetStatusFileOutcomes(test.name, test.variant)),
} for test in timed_tests[:20] } for (test, duration) in self.tests[:20]
] ]
complete_results.append({ complete_results.append({
...@@ -343,7 +343,7 @@ class JsonTestProgressIndicator(ProgressIndicator): ...@@ -343,7 +343,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
def HasRun(self, test, output, has_unexpected_output): def HasRun(self, test, output, has_unexpected_output):
# Buffer all tests for sorting the durations in the end. # Buffer all tests for sorting the durations in the end.
self.tests.append(test) self.tests.append((test, output.duration))
if not has_unexpected_output: if not has_unexpected_output:
# Omit tests that run as expected. Passing tests of reruns after failures # Omit tests that run as expected. Passing tests of reruns after failures
# will have unexpected_output to be reported here has well. # will have unexpected_output to be reported here has well.
...@@ -359,7 +359,7 @@ class JsonTestProgressIndicator(ProgressIndicator): ...@@ -359,7 +359,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
"exit_code": output.exit_code, "exit_code": output.exit_code,
"result": test.suite.GetOutcome(test, output), "result": test.suite.GetOutcome(test, output),
"expected": test.suite.GetExpectedOutcomes(test), "expected": test.suite.GetExpectedOutcomes(test),
"duration": test.duration, "duration": output.duration,
# TODO(machenbach): This stores only the global random seed from the # TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress. # context and not possible overrides when using random-seed stress.
...@@ -404,14 +404,14 @@ class FlakinessTestProgressIndicator(ProgressIndicator): ...@@ -404,14 +404,14 @@ class FlakinessTestProgressIndicator(ProgressIndicator):
self.results[key] = { self.results[key] = {
"actual": outcome, "actual": outcome,
"expected": " ".join(expected_outcomes), "expected": " ".join(expected_outcomes),
"times": [test.duration], "times": [output.duration],
} }
self.summary[outcome] = self.summary[outcome] + 1 self.summary[outcome] = self.summary[outcome] + 1
else: else:
# This is a rerun and a previous result exists. # This is a rerun and a previous result exists.
result = self.results[key] result = self.results[key]
result["actual"] = "%s %s" % (result["actual"], outcome) result["actual"] = "%s %s" % (result["actual"], outcome)
result["times"].append(test.duration) result["times"].append(output.duration)
PROGRESS_INDICATORS = { PROGRESS_INDICATORS = {
......
...@@ -86,7 +86,6 @@ class TestSuite(object): ...@@ -86,7 +86,6 @@ class TestSuite(object):
self.tests = None # list of TestCase objects self.tests = None # list of TestCase objects
self.rules = None # {variant: {test name: [rule]}} self.rules = None # {variant: {test name: [rule]}}
self.prefix_rules = None # {variant: {test name prefix: [rule]}} self.prefix_rules = None # {variant: {test name prefix: [rule]}}
self.total_duration = None # float, assigned on demand
self._outcomes_cache = dict() self._outcomes_cache = dict()
......
...@@ -86,16 +86,16 @@ def FormatTime(d): ...@@ -86,16 +86,16 @@ def FormatTime(d):
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis) return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def PrintTestDurations(suites, overall_time): def PrintTestDurations(suites, outputs, overall_time):
# Write the times to stderr to make it easy to separate from the # Write the times to stderr to make it easy to separate from the
# test output. # test output.
print print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(overall_time)) sys.stderr.write("--- Total time: %s ---\n" % FormatTime(overall_time))
timed_tests = [ t for s in suites for t in s.tests timed_tests = [(t, outputs[t].duration) for s in suites for t in s.tests
if t.duration is not None ] if t in outputs]
timed_tests.sort(lambda a, b: cmp(b.duration, a.duration)) timed_tests.sort(key=lambda (_, duration): duration, reverse=True)
index = 1 index = 1
for test in timed_tests[:20]: for test, duration in timed_tests[:20]:
t = FormatTime(test.duration) t = FormatTime(duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, test)) sys.stderr.write("%4i (%s) %s\n" % (index, t, test))
index += 1 index += 1
...@@ -32,12 +32,13 @@ from ..local import utils ...@@ -32,12 +32,13 @@ from ..local import utils
class Output(object): class Output(object):
def __init__(self, exit_code, timed_out, stdout, stderr, pid): def __init__(self, exit_code, timed_out, stdout, stderr, pid, duration):
self.exit_code = exit_code self.exit_code = exit_code
self.timed_out = timed_out self.timed_out = timed_out
self.stdout = stdout self.stdout = stdout
self.stderr = stderr self.stderr = stderr
self.pid = pid self.pid = pid
self.duration = duration
def HasCrashed(self): def HasCrashed(self):
if utils.IsWindows(): if utils.IsWindows():
......
...@@ -46,7 +46,6 @@ class TestCase(object): ...@@ -46,7 +46,6 @@ class TestCase(object):
self.variant_flags = [] # list of strings, flags specific to this test self.variant_flags = [] # list of strings, flags specific to this test
self.id = None # int, used to map result back to TestCase instance self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed. self.run = 1 # The nth time this test is executed.
self.cmd = None self.cmd = None
......
...@@ -492,7 +492,7 @@ class StandardTestRunner(base_runner.BaseTestRunner): ...@@ -492,7 +492,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
overall_duration = time.time() - start_time overall_duration = time.time() - start_time
if options.time: if options.time:
verbose.PrintTestDurations(suites, overall_duration) verbose.PrintTestDurations(suites, runner.outputs, overall_duration)
if num_tests == 0: if num_tests == 0:
print("Warning: no tests were run!") print("Warning: no tests were run!")
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment