Commit 9e69733b authored by Michal Majewski's avatar Michal Majewski Committed by Commit Bot

[test] Move shard methods to the base runner

Bug: v8:6917
Change-Id: I0b81ebfe289b459e30ad85a4a62ed244cbd20b65
Cq-Include-Trybots: luci.v8.try:v8_linux64_fyi_rel_ng
Reviewed-on: https://chromium-review.googlesource.com/870123Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Michał Majewski <majeski@google.com>
Cr-Commit-Position: refs/heads/master@{#50657}
parent 45833d9b
......@@ -20,6 +20,8 @@ sys.path.insert(
from local import testsuite
from local import utils
from testproc.shard import ShardProc
BASE_DIR = (
os.path.dirname(
......@@ -247,6 +249,12 @@ class BaseTestRunner(object):
"directory will be used")
parser.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
parser.add_option("--shard-count",
help="Split tests into this number of shards",
default=1, type="int")
parser.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
def _add_parser_options(self, parser):
pass
......@@ -490,3 +498,46 @@ class BaseTestRunner(object):
# TODO(majeski): remove options & args parameters
def _do_execute(self, suites, args, options):
raise NotImplementedError()
def _create_shard_proc(self, options):
myid, count = self._get_shard_info(options)
if count == 1:
return None
return ShardProc(myid - 1, count)
def _get_shard_info(self, options):
"""
Returns pair:
(id of the current shard [1; number of shards], number of shards)
"""
# Read gtest shard configuration from environment (e.g. set by swarming).
# If none is present, use values passed on the command line.
shard_count = int(
os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
shard_run = os.environ.get('GTEST_SHARD_INDEX')
if shard_run is not None:
# The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
shard_run = int(shard_run) + 1
else:
shard_run = options.shard_run
if options.shard_count > 1:
# Log if a value was passed on the cmd line and it differs from the
# environment variables.
if options.shard_count != shard_count: # pragma: no cover
print("shard_count from cmd line differs from environment variable "
"GTEST_TOTAL_SHARDS")
if (options.shard_run > 1 and
options.shard_run != shard_run): # pragma: no cover
print("shard_run from cmd line differs from environment variable "
"GTEST_SHARD_INDEX")
if shard_run < 1 or shard_run > shard_count:
# TODO(machenbach): Turn this into an assert. If that's wrong on the
# bots, printing will be quite useless. Or refactor this code to make
# sure we get a return code != 0 after testing if we got here.
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return 1, 1
return shard_run, shard_count
......@@ -136,12 +136,6 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
parser.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
parser.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
parser.add_option("--seed", help="The seed for the random distribution",
type="int")
parser.add_option("-t", "--timeout", help="Timeout in seconds",
......@@ -184,21 +178,6 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
options.coverage_lift = 0
return True
def _shard_tests(self, tests, shard_count, shard_run):
if shard_count < 2:
return tests
if shard_run < 1 or shard_run > shard_count:
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return tests
count = 0
shard = []
for test in tests:
if count % shard_count == shard_run - 1:
shard.append(test)
count += 1
return shard
def _calculate_n_tests(self, m, options):
"""Calculates the number of tests from m deopt points with exponential
coverage.
......
......@@ -64,12 +64,6 @@ class GCFuzzer(base_runner.BaseTestRunner):
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
parser.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
parser.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
parser.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int")
parser.add_option("--random-seed", default=0,
......@@ -102,21 +96,6 @@ class GCFuzzer(base_runner.BaseTestRunner):
self.fuzzer_rng = random.Random(options.fuzzer_random_seed)
return True
def _shard_tests(self, tests, shard_count, shard_run):
if shard_count < 2:
return tests
if shard_run < 1 or shard_run > shard_count:
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return tests
count = 0
shard = []
for test in tests:
if count % shard_count == shard_run - 1:
shard.append(test)
count += 1
return shard
def _calculate_n_tests(self, m, options):
"""Calculates the number of tests from m points with exponential coverage.
The coverage is expected to be between 0.0 and 1.0.
......
......@@ -33,7 +33,6 @@ from testrunner.testproc.progress import (VerboseProgressIndicator,
ResultsTracker,
TestsCounter)
from testrunner.testproc.rerun import RerunProc
from testrunner.testproc.shard import ShardProc
from testrunner.testproc.variant import VariantProc
......@@ -183,12 +182,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
parser.add_option("--rerun-failures-max",
help="Maximum number of failing test cases to rerun.",
default=100, type="int")
parser.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
parser.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
parser.add_option("--dont-skip-slow-simulator-tests",
help="Don't skip more slow tests when using a"
" simulator.",
......@@ -535,49 +528,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
count += 1
return shard
def _create_shard_proc(self, options):
myid, count = self._get_shard_info(options)
if count == 1:
return None
return ShardProc(myid - 1, count)
def _get_shard_info(self, options):
"""
Returns pair:
(id of the current shard [1; number of shards], number of shards)
"""
# Read gtest shard configuration from environment (e.g. set by swarming).
# If none is present, use values passed on the command line.
shard_count = int(
os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
shard_run = os.environ.get('GTEST_SHARD_INDEX')
if shard_run is not None:
# The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
shard_run = int(shard_run) + 1
else:
shard_run = options.shard_run
if options.shard_count > 1:
# Log if a value was passed on the cmd line and it differs from the
# environment variables.
if options.shard_count != shard_count: # pragma: no cover
print("shard_count from cmd line differs from environment variable "
"GTEST_TOTAL_SHARDS")
if (options.shard_run > 1 and
options.shard_run != shard_run): # pragma: no cover
print("shard_run from cmd line differs from environment variable "
"GTEST_SHARD_INDEX")
if shard_run < 1 or shard_run > shard_count:
# TODO(machenbach): Turn this into an assert. If that's wrong on the
# bots, printing will be quite useless. Or refactor this code to make
# sure we get a return code != 0 after testing if we got here.
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return 1, 1
return shard_run, shard_count
def _run_test_procs(self, suites, args, options, progress_indicator,
context):
jobs = options.j
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment